repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
manns/pyspread | pyspread/src/lib/vlc.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L5130-L5138 | def libvlc_media_player_get_nsobject(p_mi):
'''Get the NSView handler previously set with L{libvlc_media_player_set_nsobject}().
@param p_mi: the Media Player.
@return: the NSView handler or 0 if none where set.
'''
f = _Cfunctions.get('libvlc_media_player_get_nsobject', None) or \
_Cfunction('libvlc_media_player_get_nsobject', ((1,),), None,
ctypes.c_void_p, MediaPlayer)
return f(p_mi) | [
"def",
"libvlc_media_player_get_nsobject",
"(",
"p_mi",
")",
":",
"f",
"=",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_media_player_get_nsobject'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_media_player_get_nsobject'",
",",
"(",
"(",
"1",
",",
")",
",",
")",
",",
"None",
",",
"ctypes",
".",
"c_void_p",
",",
"MediaPlayer",
")",
"return",
"f",
"(",
"p_mi",
")"
] | Get the NSView handler previously set with L{libvlc_media_player_set_nsobject}().
@param p_mi: the Media Player.
@return: the NSView handler or 0 if none where set. | [
"Get",
"the",
"NSView",
"handler",
"previously",
"set",
"with",
"L",
"{",
"libvlc_media_player_set_nsobject",
"}",
"()",
"."
] | python | train |
facelessuser/wcmatch | wcmatch/wcmatch.py | https://github.com/facelessuser/wcmatch/blob/d153e7007cc73b994ae1ba553dc4584039f5c212/wcmatch/wcmatch.py#L214-L258 | def _walk(self):
"""Start search for valid files."""
self._base_len = len(self.base)
for base, dirs, files in os.walk(self.base, followlinks=self.follow_links):
# Remove child folders based on exclude rules
for name in dirs[:]:
try:
if not self._valid_folder(base, name):
dirs.remove(name)
except Exception:
dirs.remove(name)
value = self.on_error(base, name)
if value is not None: # pragma: no cover
yield value
if self._abort:
break
# Search files if they were found
if len(files):
# Only search files that are in the include rules
for name in files:
try:
valid = self._valid_file(base, name)
except Exception:
valid = False
value = self.on_error(base, name)
if value is not None:
yield value
if valid:
yield self.on_match(base, name)
else:
self._skipped += 1
value = self.on_skip(base, name)
if value is not None:
yield value
if self._abort:
break
if self._abort:
break | [
"def",
"_walk",
"(",
"self",
")",
":",
"self",
".",
"_base_len",
"=",
"len",
"(",
"self",
".",
"base",
")",
"for",
"base",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"self",
".",
"base",
",",
"followlinks",
"=",
"self",
".",
"follow_links",
")",
":",
"# Remove child folders based on exclude rules",
"for",
"name",
"in",
"dirs",
"[",
":",
"]",
":",
"try",
":",
"if",
"not",
"self",
".",
"_valid_folder",
"(",
"base",
",",
"name",
")",
":",
"dirs",
".",
"remove",
"(",
"name",
")",
"except",
"Exception",
":",
"dirs",
".",
"remove",
"(",
"name",
")",
"value",
"=",
"self",
".",
"on_error",
"(",
"base",
",",
"name",
")",
"if",
"value",
"is",
"not",
"None",
":",
"# pragma: no cover",
"yield",
"value",
"if",
"self",
".",
"_abort",
":",
"break",
"# Search files if they were found",
"if",
"len",
"(",
"files",
")",
":",
"# Only search files that are in the include rules",
"for",
"name",
"in",
"files",
":",
"try",
":",
"valid",
"=",
"self",
".",
"_valid_file",
"(",
"base",
",",
"name",
")",
"except",
"Exception",
":",
"valid",
"=",
"False",
"value",
"=",
"self",
".",
"on_error",
"(",
"base",
",",
"name",
")",
"if",
"value",
"is",
"not",
"None",
":",
"yield",
"value",
"if",
"valid",
":",
"yield",
"self",
".",
"on_match",
"(",
"base",
",",
"name",
")",
"else",
":",
"self",
".",
"_skipped",
"+=",
"1",
"value",
"=",
"self",
".",
"on_skip",
"(",
"base",
",",
"name",
")",
"if",
"value",
"is",
"not",
"None",
":",
"yield",
"value",
"if",
"self",
".",
"_abort",
":",
"break",
"if",
"self",
".",
"_abort",
":",
"break"
] | Start search for valid files. | [
"Start",
"search",
"for",
"valid",
"files",
"."
] | python | train |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L757-L835 | def visualize(self, numPoints=100):
"""
Visualization utility for models.
Helps to debug the math and logic.
Helps to monitor complex objects with difficult to define boundaries.
Only supports 3-dimensional objects.
TODO: center the objects using scale, rotate and translate operations on mesh objects.
"""
try:
import pyqtgraph as pg
import pyqtgraph.multiprocess as mp
import pyqtgraph.opengl as gl
except ImportError as e:
print("PyQtGraph needs to be installed.")
return (None, None, None, None, None)
class PlyVisWindow:
"""
The pyqtgraph visualization utility window class
Creates a remote process with viewbox frame for visualizations
Provided access to mesh and scatter for realtime update to view.
"""
def __init__(self):
self.proc = mp.QtProcess()
self.rpg = self.proc._import('pyqtgraph')
self.rgl = self.proc._import('pyqtgraph.opengl')
self.rview = self.rgl.GLViewWidget()
self.rview.setBackgroundColor('k')
self.rview.setCameraPosition(distance=10)
self.grid = self.rgl.GLGridItem()
self.rview.addItem(self.grid)
self.rpg.setConfigOption('background', 'w')
self.rpg.setConfigOption('foreground', 'k')
def snapshot(self, name=""):
"""
utility to grabframe of the visualization window.
@param name (string) helps to avoid overwriting grabbed images programmatically.
"""
self.rview.grabFrameBuffer().save("{}.png".format(name))
# We might need this for future purposes Dont Delete
# class MeshUpdate:
# def __init__(self, proc):
# self.data_x = proc.transfer([])
# self.data_y = proc.transfer([])
# self._t = None
# @property
# def t(self):
# return self._t
# def update(self,x):
# self.data_y.extend([x], _callSync='async')
# self.data_x.extend([self.t], _callSync='async',)
# self.curve.setData(y=self.data_y, _callSync='async')
pg.mkQApp()
self.graphicsWindow = PlyVisWindow()
self.graphicsWindow.rview.setWindowTitle(self.file)
vertices = self.vertices.data
vertices = np.array(vertices.tolist())
faces = np.array([self.faces[i]['vertex_indices'] for i in range(self.faces.count)])
self.mesh = self.graphicsWindow.rgl.GLMeshItem(vertexes=vertices, faces=faces,
shader='normalColor', drawEdges=True,
drawFaces=True, computeNormals=False,
smooth=False)
self.graphicsWindow.rview.addItem(self.mesh)
self.graphicsWindow.rview.show()
pos = np.empty((numPoints,3))
size = np.ones((numPoints,))
color = np.ones((numPoints,4))
self.scatter = self.graphicsWindow.rgl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=True)
self.graphicsWindow.rview.addItem(self.scatter)
return self.scatter, self.mesh, pos, size, color | [
"def",
"visualize",
"(",
"self",
",",
"numPoints",
"=",
"100",
")",
":",
"try",
":",
"import",
"pyqtgraph",
"as",
"pg",
"import",
"pyqtgraph",
".",
"multiprocess",
"as",
"mp",
"import",
"pyqtgraph",
".",
"opengl",
"as",
"gl",
"except",
"ImportError",
"as",
"e",
":",
"print",
"(",
"\"PyQtGraph needs to be installed.\"",
")",
"return",
"(",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
")",
"class",
"PlyVisWindow",
":",
"\"\"\"\n The pyqtgraph visualization utility window class\n\n Creates a remote process with viewbox frame for visualizations\n Provided access to mesh and scatter for realtime update to view. \n \"\"\"",
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"proc",
"=",
"mp",
".",
"QtProcess",
"(",
")",
"self",
".",
"rpg",
"=",
"self",
".",
"proc",
".",
"_import",
"(",
"'pyqtgraph'",
")",
"self",
".",
"rgl",
"=",
"self",
".",
"proc",
".",
"_import",
"(",
"'pyqtgraph.opengl'",
")",
"self",
".",
"rview",
"=",
"self",
".",
"rgl",
".",
"GLViewWidget",
"(",
")",
"self",
".",
"rview",
".",
"setBackgroundColor",
"(",
"'k'",
")",
"self",
".",
"rview",
".",
"setCameraPosition",
"(",
"distance",
"=",
"10",
")",
"self",
".",
"grid",
"=",
"self",
".",
"rgl",
".",
"GLGridItem",
"(",
")",
"self",
".",
"rview",
".",
"addItem",
"(",
"self",
".",
"grid",
")",
"self",
".",
"rpg",
".",
"setConfigOption",
"(",
"'background'",
",",
"'w'",
")",
"self",
".",
"rpg",
".",
"setConfigOption",
"(",
"'foreground'",
",",
"'k'",
")",
"def",
"snapshot",
"(",
"self",
",",
"name",
"=",
"\"\"",
")",
":",
"\"\"\"\n utility to grabframe of the visualization window.\n\n @param name (string) helps to avoid overwriting grabbed images programmatically.\n \"\"\"",
"self",
".",
"rview",
".",
"grabFrameBuffer",
"(",
")",
".",
"save",
"(",
"\"{}.png\"",
".",
"format",
"(",
"name",
")",
")",
"# We might need this for future purposes Dont Delete",
"# class MeshUpdate:",
"# def __init__(self, proc):",
"# self.data_x = proc.transfer([])",
"# self.data_y = proc.transfer([])",
"# self._t = None",
"# @property",
"# def t(self):",
"# return self._t",
"# def update(self,x):",
"# self.data_y.extend([x], _callSync='async')",
"# self.data_x.extend([self.t], _callSync='async',)",
"# self.curve.setData(y=self.data_y, _callSync='async')",
"pg",
".",
"mkQApp",
"(",
")",
"self",
".",
"graphicsWindow",
"=",
"PlyVisWindow",
"(",
")",
"self",
".",
"graphicsWindow",
".",
"rview",
".",
"setWindowTitle",
"(",
"self",
".",
"file",
")",
"vertices",
"=",
"self",
".",
"vertices",
".",
"data",
"vertices",
"=",
"np",
".",
"array",
"(",
"vertices",
".",
"tolist",
"(",
")",
")",
"faces",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"faces",
"[",
"i",
"]",
"[",
"'vertex_indices'",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"faces",
".",
"count",
")",
"]",
")",
"self",
".",
"mesh",
"=",
"self",
".",
"graphicsWindow",
".",
"rgl",
".",
"GLMeshItem",
"(",
"vertexes",
"=",
"vertices",
",",
"faces",
"=",
"faces",
",",
"shader",
"=",
"'normalColor'",
",",
"drawEdges",
"=",
"True",
",",
"drawFaces",
"=",
"True",
",",
"computeNormals",
"=",
"False",
",",
"smooth",
"=",
"False",
")",
"self",
".",
"graphicsWindow",
".",
"rview",
".",
"addItem",
"(",
"self",
".",
"mesh",
")",
"self",
".",
"graphicsWindow",
".",
"rview",
".",
"show",
"(",
")",
"pos",
"=",
"np",
".",
"empty",
"(",
"(",
"numPoints",
",",
"3",
")",
")",
"size",
"=",
"np",
".",
"ones",
"(",
"(",
"numPoints",
",",
")",
")",
"color",
"=",
"np",
".",
"ones",
"(",
"(",
"numPoints",
",",
"4",
")",
")",
"self",
".",
"scatter",
"=",
"self",
".",
"graphicsWindow",
".",
"rgl",
".",
"GLScatterPlotItem",
"(",
"pos",
"=",
"pos",
",",
"size",
"=",
"size",
",",
"color",
"=",
"color",
",",
"pxMode",
"=",
"True",
")",
"self",
".",
"graphicsWindow",
".",
"rview",
".",
"addItem",
"(",
"self",
".",
"scatter",
")",
"return",
"self",
".",
"scatter",
",",
"self",
".",
"mesh",
",",
"pos",
",",
"size",
",",
"color"
] | Visualization utility for models.
Helps to debug the math and logic.
Helps to monitor complex objects with difficult to define boundaries.
Only supports 3-dimensional objects.
TODO: center the objects using scale, rotate and translate operations on mesh objects. | [
"Visualization",
"utility",
"for",
"models",
".",
"Helps",
"to",
"debug",
"the",
"math",
"and",
"logic",
".",
"Helps",
"to",
"monitor",
"complex",
"objects",
"with",
"difficult",
"to",
"define",
"boundaries",
"."
] | python | train |
blockstack/blockstack-core | blockstack/lib/subdomains.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/subdomains.py#L850-L929 | def index_discovered_zonefiles(self, lastblock):
"""
Go through the list of zone files we discovered via Atlas, grouped by name and ordered by block height.
Find all subsequent zone files for this name, and process all subdomain operations contained within them.
"""
all_queued_zfinfos = [] # contents of the queue
subdomain_zonefile_infos = {} # map subdomain fqn to list of zonefile info bundles, for process_subdomains
name_blocks = {} # map domain name to the block at which we should reprocess its subsequent zone files
offset = 0
while True:
queued_zfinfos = queuedb_findall(self.subdomain_queue_path, "zonefiles", limit=100, offset=offset)
if len(queued_zfinfos) == 0:
# done!
break
offset += 100
all_queued_zfinfos += queued_zfinfos
if len(all_queued_zfinfos) >= 1000:
# only do so many zone files per block, so we don't stall the node
break
log.debug("Discovered {} zonefiles".format(len(all_queued_zfinfos)))
for queued_zfinfo in all_queued_zfinfos:
zfinfo = json.loads(queued_zfinfo['data'])
zonefile_hash = zfinfo['zonefile_hash']
block_height = zfinfo['block_height']
# find out the names that sent this zone file at this block
zfinfos = atlasdb_get_zonefiles_by_hash(zonefile_hash, block_height=block_height, path=self.atlasdb_path)
if zfinfos is None:
log.warn("Absent zonefile {}".format(zonefile_hash))
continue
# find out for each name block height at which its zone file was discovered.
# this is where we'll begin looking for more subdomain updates.
for zfi in zfinfos:
if zfi['name'] not in name_blocks:
name_blocks[zfi['name']] = block_height
else:
name_blocks[zfi['name']] = min(block_height, name_blocks[zfi['name']])
for name in name_blocks:
if name_blocks[name] >= lastblock:
continue
log.debug("Finding subdomain updates for {} at block {}".format(name, name_blocks[name]))
# get the subdomains affected at this block by finding the zonefiles created here.
res = self.find_zonefile_subdomains(name_blocks[name], lastblock, name=name)
zonefile_subdomain_info = res['zonefile_info']
subdomain_index = res['subdomains']
# for each subdomain, find the list of zonefiles that contain records for it
for fqn in subdomain_index:
if fqn not in subdomain_zonefile_infos:
subdomain_zonefile_infos[fqn] = []
for i in subdomain_index[fqn]:
subdomain_zonefile_infos[fqn].append(zonefile_subdomain_info[i])
processed = []
for fqn in subdomain_zonefile_infos:
subseq = filter(lambda szi: szi['zonefile_hash'] not in processed, subdomain_zonefile_infos[fqn])
if len(subseq) == 0:
continue
log.debug("Processing {} zone file entries found for {} and others".format(len(subseq), fqn))
subseq.sort(cmp=lambda z1, z2: -1 if z1['block_height'] < z2['block_height'] else 0 if z1['block_height'] == z2['block_height'] else 1)
self.process_subdomains(subseq)
processed += [szi['zonefile_hash'] for szi in subseq]
# clear queue
queuedb_removeall(self.subdomain_queue_path, all_queued_zfinfos)
return True | [
"def",
"index_discovered_zonefiles",
"(",
"self",
",",
"lastblock",
")",
":",
"all_queued_zfinfos",
"=",
"[",
"]",
"# contents of the queue",
"subdomain_zonefile_infos",
"=",
"{",
"}",
"# map subdomain fqn to list of zonefile info bundles, for process_subdomains",
"name_blocks",
"=",
"{",
"}",
"# map domain name to the block at which we should reprocess its subsequent zone files",
"offset",
"=",
"0",
"while",
"True",
":",
"queued_zfinfos",
"=",
"queuedb_findall",
"(",
"self",
".",
"subdomain_queue_path",
",",
"\"zonefiles\"",
",",
"limit",
"=",
"100",
",",
"offset",
"=",
"offset",
")",
"if",
"len",
"(",
"queued_zfinfos",
")",
"==",
"0",
":",
"# done!",
"break",
"offset",
"+=",
"100",
"all_queued_zfinfos",
"+=",
"queued_zfinfos",
"if",
"len",
"(",
"all_queued_zfinfos",
")",
">=",
"1000",
":",
"# only do so many zone files per block, so we don't stall the node",
"break",
"log",
".",
"debug",
"(",
"\"Discovered {} zonefiles\"",
".",
"format",
"(",
"len",
"(",
"all_queued_zfinfos",
")",
")",
")",
"for",
"queued_zfinfo",
"in",
"all_queued_zfinfos",
":",
"zfinfo",
"=",
"json",
".",
"loads",
"(",
"queued_zfinfo",
"[",
"'data'",
"]",
")",
"zonefile_hash",
"=",
"zfinfo",
"[",
"'zonefile_hash'",
"]",
"block_height",
"=",
"zfinfo",
"[",
"'block_height'",
"]",
"# find out the names that sent this zone file at this block",
"zfinfos",
"=",
"atlasdb_get_zonefiles_by_hash",
"(",
"zonefile_hash",
",",
"block_height",
"=",
"block_height",
",",
"path",
"=",
"self",
".",
"atlasdb_path",
")",
"if",
"zfinfos",
"is",
"None",
":",
"log",
".",
"warn",
"(",
"\"Absent zonefile {}\"",
".",
"format",
"(",
"zonefile_hash",
")",
")",
"continue",
"# find out for each name block height at which its zone file was discovered.",
"# this is where we'll begin looking for more subdomain updates.",
"for",
"zfi",
"in",
"zfinfos",
":",
"if",
"zfi",
"[",
"'name'",
"]",
"not",
"in",
"name_blocks",
":",
"name_blocks",
"[",
"zfi",
"[",
"'name'",
"]",
"]",
"=",
"block_height",
"else",
":",
"name_blocks",
"[",
"zfi",
"[",
"'name'",
"]",
"]",
"=",
"min",
"(",
"block_height",
",",
"name_blocks",
"[",
"zfi",
"[",
"'name'",
"]",
"]",
")",
"for",
"name",
"in",
"name_blocks",
":",
"if",
"name_blocks",
"[",
"name",
"]",
">=",
"lastblock",
":",
"continue",
"log",
".",
"debug",
"(",
"\"Finding subdomain updates for {} at block {}\"",
".",
"format",
"(",
"name",
",",
"name_blocks",
"[",
"name",
"]",
")",
")",
"# get the subdomains affected at this block by finding the zonefiles created here.",
"res",
"=",
"self",
".",
"find_zonefile_subdomains",
"(",
"name_blocks",
"[",
"name",
"]",
",",
"lastblock",
",",
"name",
"=",
"name",
")",
"zonefile_subdomain_info",
"=",
"res",
"[",
"'zonefile_info'",
"]",
"subdomain_index",
"=",
"res",
"[",
"'subdomains'",
"]",
"# for each subdomain, find the list of zonefiles that contain records for it",
"for",
"fqn",
"in",
"subdomain_index",
":",
"if",
"fqn",
"not",
"in",
"subdomain_zonefile_infos",
":",
"subdomain_zonefile_infos",
"[",
"fqn",
"]",
"=",
"[",
"]",
"for",
"i",
"in",
"subdomain_index",
"[",
"fqn",
"]",
":",
"subdomain_zonefile_infos",
"[",
"fqn",
"]",
".",
"append",
"(",
"zonefile_subdomain_info",
"[",
"i",
"]",
")",
"processed",
"=",
"[",
"]",
"for",
"fqn",
"in",
"subdomain_zonefile_infos",
":",
"subseq",
"=",
"filter",
"(",
"lambda",
"szi",
":",
"szi",
"[",
"'zonefile_hash'",
"]",
"not",
"in",
"processed",
",",
"subdomain_zonefile_infos",
"[",
"fqn",
"]",
")",
"if",
"len",
"(",
"subseq",
")",
"==",
"0",
":",
"continue",
"log",
".",
"debug",
"(",
"\"Processing {} zone file entries found for {} and others\"",
".",
"format",
"(",
"len",
"(",
"subseq",
")",
",",
"fqn",
")",
")",
"subseq",
".",
"sort",
"(",
"cmp",
"=",
"lambda",
"z1",
",",
"z2",
":",
"-",
"1",
"if",
"z1",
"[",
"'block_height'",
"]",
"<",
"z2",
"[",
"'block_height'",
"]",
"else",
"0",
"if",
"z1",
"[",
"'block_height'",
"]",
"==",
"z2",
"[",
"'block_height'",
"]",
"else",
"1",
")",
"self",
".",
"process_subdomains",
"(",
"subseq",
")",
"processed",
"+=",
"[",
"szi",
"[",
"'zonefile_hash'",
"]",
"for",
"szi",
"in",
"subseq",
"]",
"# clear queue ",
"queuedb_removeall",
"(",
"self",
".",
"subdomain_queue_path",
",",
"all_queued_zfinfos",
")",
"return",
"True"
] | Go through the list of zone files we discovered via Atlas, grouped by name and ordered by block height.
Find all subsequent zone files for this name, and process all subdomain operations contained within them. | [
"Go",
"through",
"the",
"list",
"of",
"zone",
"files",
"we",
"discovered",
"via",
"Atlas",
"grouped",
"by",
"name",
"and",
"ordered",
"by",
"block",
"height",
".",
"Find",
"all",
"subsequent",
"zone",
"files",
"for",
"this",
"name",
"and",
"process",
"all",
"subdomain",
"operations",
"contained",
"within",
"them",
"."
] | python | train |
pantsbuild/pants | src/python/pants/pantsd/process_manager.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/pantsd/process_manager.py#L565-L580 | def parse_fingerprint(self, cmdline, key=None, sep=None):
"""Given a psutil.Process.cmdline, parse and return a fingerprint.
:param list cmdline: The psutil.Process.cmdline of the current process.
:param string key: The key for fingerprint discovery.
:param string sep: The key/value separator for fingerprint discovery.
:returns: The parsed fingerprint or `None`.
:rtype: string or `None`
"""
key = key or self.FINGERPRINT_CMD_KEY
if key:
sep = sep or self.FINGERPRINT_CMD_SEP
cmdline = cmdline or []
for cmd_part in cmdline:
if cmd_part.startswith('{}{}'.format(key, sep)):
return cmd_part.split(sep)[1] | [
"def",
"parse_fingerprint",
"(",
"self",
",",
"cmdline",
",",
"key",
"=",
"None",
",",
"sep",
"=",
"None",
")",
":",
"key",
"=",
"key",
"or",
"self",
".",
"FINGERPRINT_CMD_KEY",
"if",
"key",
":",
"sep",
"=",
"sep",
"or",
"self",
".",
"FINGERPRINT_CMD_SEP",
"cmdline",
"=",
"cmdline",
"or",
"[",
"]",
"for",
"cmd_part",
"in",
"cmdline",
":",
"if",
"cmd_part",
".",
"startswith",
"(",
"'{}{}'",
".",
"format",
"(",
"key",
",",
"sep",
")",
")",
":",
"return",
"cmd_part",
".",
"split",
"(",
"sep",
")",
"[",
"1",
"]"
] | Given a psutil.Process.cmdline, parse and return a fingerprint.
:param list cmdline: The psutil.Process.cmdline of the current process.
:param string key: The key for fingerprint discovery.
:param string sep: The key/value separator for fingerprint discovery.
:returns: The parsed fingerprint or `None`.
:rtype: string or `None` | [
"Given",
"a",
"psutil",
".",
"Process",
".",
"cmdline",
"parse",
"and",
"return",
"a",
"fingerprint",
"."
] | python | train |
elastic/apm-agent-python | elasticapm/processors.py | https://github.com/elastic/apm-agent-python/blob/2975663d7bd22282dc39336b2c37b37c12c7a774/elasticapm/processors.py#L212-L229 | def sanitize_http_request_body(client, event):
"""
Sanitizes http request body. This only works if the request body
is a query-encoded string. Other types (e.g. JSON) are not handled by
this sanitizer.
:param client: an ElasticAPM client
:param event: a transaction or error event
:return: The modified event
"""
try:
body = force_text(event["context"]["request"]["body"], errors="replace")
except (KeyError, TypeError):
return event
if "=" in body:
sanitized_query_string = _sanitize_string(body, "&", "=")
event["context"]["request"]["body"] = sanitized_query_string
return event | [
"def",
"sanitize_http_request_body",
"(",
"client",
",",
"event",
")",
":",
"try",
":",
"body",
"=",
"force_text",
"(",
"event",
"[",
"\"context\"",
"]",
"[",
"\"request\"",
"]",
"[",
"\"body\"",
"]",
",",
"errors",
"=",
"\"replace\"",
")",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"return",
"event",
"if",
"\"=\"",
"in",
"body",
":",
"sanitized_query_string",
"=",
"_sanitize_string",
"(",
"body",
",",
"\"&\"",
",",
"\"=\"",
")",
"event",
"[",
"\"context\"",
"]",
"[",
"\"request\"",
"]",
"[",
"\"body\"",
"]",
"=",
"sanitized_query_string",
"return",
"event"
] | Sanitizes http request body. This only works if the request body
is a query-encoded string. Other types (e.g. JSON) are not handled by
this sanitizer.
:param client: an ElasticAPM client
:param event: a transaction or error event
:return: The modified event | [
"Sanitizes",
"http",
"request",
"body",
".",
"This",
"only",
"works",
"if",
"the",
"request",
"body",
"is",
"a",
"query",
"-",
"encoded",
"string",
".",
"Other",
"types",
"(",
"e",
".",
"g",
".",
"JSON",
")",
"are",
"not",
"handled",
"by",
"this",
"sanitizer",
"."
] | python | train |
nats-io/python-nats | nats/io/client.py | https://github.com/nats-io/python-nats/blob/4a409319c409e7e55ce8377b64b406375c5f455b/nats/io/client.py#L495-L514 | def _flush_timeout(self, timeout):
"""
Takes a timeout and sets up a future which will return True
once the server responds back otherwise raise a TimeoutError.
"""
future = tornado.concurrent.Future()
yield self._send_ping(future)
try:
result = yield tornado.gen.with_timeout(
timedelta(seconds=timeout), future)
except tornado.gen.TimeoutError:
# Set the future to False so it can be ignored in _process_pong,
# and try to remove from the list of pending pongs.
future.set_result(False)
for i, pong_future in enumerate(self._pongs):
if pong_future == future:
del self._pongs[i]
break
raise
raise tornado.gen.Return(result) | [
"def",
"_flush_timeout",
"(",
"self",
",",
"timeout",
")",
":",
"future",
"=",
"tornado",
".",
"concurrent",
".",
"Future",
"(",
")",
"yield",
"self",
".",
"_send_ping",
"(",
"future",
")",
"try",
":",
"result",
"=",
"yield",
"tornado",
".",
"gen",
".",
"with_timeout",
"(",
"timedelta",
"(",
"seconds",
"=",
"timeout",
")",
",",
"future",
")",
"except",
"tornado",
".",
"gen",
".",
"TimeoutError",
":",
"# Set the future to False so it can be ignored in _process_pong,",
"# and try to remove from the list of pending pongs.",
"future",
".",
"set_result",
"(",
"False",
")",
"for",
"i",
",",
"pong_future",
"in",
"enumerate",
"(",
"self",
".",
"_pongs",
")",
":",
"if",
"pong_future",
"==",
"future",
":",
"del",
"self",
".",
"_pongs",
"[",
"i",
"]",
"break",
"raise",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"result",
")"
] | Takes a timeout and sets up a future which will return True
once the server responds back otherwise raise a TimeoutError. | [
"Takes",
"a",
"timeout",
"and",
"sets",
"up",
"a",
"future",
"which",
"will",
"return",
"True",
"once",
"the",
"server",
"responds",
"back",
"otherwise",
"raise",
"a",
"TimeoutError",
"."
] | python | train |
Robpol86/terminaltables | example1.py | https://github.com/Robpol86/terminaltables/blob/ad8f46e50afdbaea377fc1f713bc0e7a31c4fccc/example1.py#L18-L38 | def main():
"""Main function."""
title = 'Jetta SportWagen'
# AsciiTable.
table_instance = AsciiTable(TABLE_DATA, title)
table_instance.justify_columns[2] = 'right'
print(table_instance.table)
print()
# SingleTable.
table_instance = SingleTable(TABLE_DATA, title)
table_instance.justify_columns[2] = 'right'
print(table_instance.table)
print()
# DoubleTable.
table_instance = DoubleTable(TABLE_DATA, title)
table_instance.justify_columns[2] = 'right'
print(table_instance.table)
print() | [
"def",
"main",
"(",
")",
":",
"title",
"=",
"'Jetta SportWagen'",
"# AsciiTable.",
"table_instance",
"=",
"AsciiTable",
"(",
"TABLE_DATA",
",",
"title",
")",
"table_instance",
".",
"justify_columns",
"[",
"2",
"]",
"=",
"'right'",
"print",
"(",
"table_instance",
".",
"table",
")",
"print",
"(",
")",
"# SingleTable.",
"table_instance",
"=",
"SingleTable",
"(",
"TABLE_DATA",
",",
"title",
")",
"table_instance",
".",
"justify_columns",
"[",
"2",
"]",
"=",
"'right'",
"print",
"(",
"table_instance",
".",
"table",
")",
"print",
"(",
")",
"# DoubleTable.",
"table_instance",
"=",
"DoubleTable",
"(",
"TABLE_DATA",
",",
"title",
")",
"table_instance",
".",
"justify_columns",
"[",
"2",
"]",
"=",
"'right'",
"print",
"(",
"table_instance",
".",
"table",
")",
"print",
"(",
")"
] | Main function. | [
"Main",
"function",
"."
] | python | train |
belbio/bel | bel/db/arangodb.py | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L247-L279 | def get_belapi_handle(client, username=None, password=None):
"""Get BEL API arango db handle"""
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
# Create a new database named "belapi"
try:
if username and password:
belapi_db = sys_db.create_database(
name=belapi_db_name,
users=[{"username": username, "password": password, "active": True}],
)
else:
belapi_db = sys_db.create_database(name=belapi_db_name)
except arango.exceptions.DatabaseCreateError:
if username and password:
belapi_db = client.db(belapi_db_name, username=username, password=password)
else:
belapi_db = client.db(belapi_db_name)
try:
belapi_db.create_collection(belapi_settings_name)
except Exception:
pass
try:
belapi_db.create_collection(belapi_statemgmt_name)
except Exception:
pass
return belapi_db | [
"def",
"get_belapi_handle",
"(",
"client",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
")",
":",
"(",
"username",
",",
"password",
")",
"=",
"get_user_creds",
"(",
"username",
",",
"password",
")",
"sys_db",
"=",
"client",
".",
"db",
"(",
"\"_system\"",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")",
"# Create a new database named \"belapi\"",
"try",
":",
"if",
"username",
"and",
"password",
":",
"belapi_db",
"=",
"sys_db",
".",
"create_database",
"(",
"name",
"=",
"belapi_db_name",
",",
"users",
"=",
"[",
"{",
"\"username\"",
":",
"username",
",",
"\"password\"",
":",
"password",
",",
"\"active\"",
":",
"True",
"}",
"]",
",",
")",
"else",
":",
"belapi_db",
"=",
"sys_db",
".",
"create_database",
"(",
"name",
"=",
"belapi_db_name",
")",
"except",
"arango",
".",
"exceptions",
".",
"DatabaseCreateError",
":",
"if",
"username",
"and",
"password",
":",
"belapi_db",
"=",
"client",
".",
"db",
"(",
"belapi_db_name",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")",
"else",
":",
"belapi_db",
"=",
"client",
".",
"db",
"(",
"belapi_db_name",
")",
"try",
":",
"belapi_db",
".",
"create_collection",
"(",
"belapi_settings_name",
")",
"except",
"Exception",
":",
"pass",
"try",
":",
"belapi_db",
".",
"create_collection",
"(",
"belapi_statemgmt_name",
")",
"except",
"Exception",
":",
"pass",
"return",
"belapi_db"
] | Get BEL API arango db handle | [
"Get",
"BEL",
"API",
"arango",
"db",
"handle"
] | python | train |
Nachtfeuer/pipeline | spline/tools/decorators.py | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/decorators.py#L25-L53 | def singleton(the_class):
"""
Decorator for a class to make a singleton out of it.
@type the_class: class
@param the_class: the class that should work as a singleton
@rtype: decorator
@return: decorator
"""
class_instances = {}
def get_instance(*args, **kwargs):
"""
Creating or just return the one and only class instance.
The singleton depends on the parameters used in __init__
@type args: list
@param args: positional arguments of the constructor.
@type kwargs: dict
@param kwargs: named parameters of the constructor.
@rtype: decorated class type
@return: singleton instance of decorated class.
"""
key = (the_class, args, str(kwargs))
if key not in class_instances:
class_instances[key] = the_class(*args, **kwargs)
return class_instances[key]
return get_instance | [
"def",
"singleton",
"(",
"the_class",
")",
":",
"class_instances",
"=",
"{",
"}",
"def",
"get_instance",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"\n Creating or just return the one and only class instance.\n\n The singleton depends on the parameters used in __init__\n @type args: list\n @param args: positional arguments of the constructor.\n @type kwargs: dict\n @param kwargs: named parameters of the constructor.\n @rtype: decorated class type\n @return: singleton instance of decorated class.\n \"\"\"",
"key",
"=",
"(",
"the_class",
",",
"args",
",",
"str",
"(",
"kwargs",
")",
")",
"if",
"key",
"not",
"in",
"class_instances",
":",
"class_instances",
"[",
"key",
"]",
"=",
"the_class",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"class_instances",
"[",
"key",
"]",
"return",
"get_instance"
] | Decorator for a class to make a singleton out of it.
@type the_class: class
@param the_class: the class that should work as a singleton
@rtype: decorator
@return: decorator | [
"Decorator",
"for",
"a",
"class",
"to",
"make",
"a",
"singleton",
"out",
"of",
"it",
"."
] | python | train |
TankerHQ/python-cli-ui | cli_ui/__init__.py | https://github.com/TankerHQ/python-cli-ui/blob/4c9928827cea06cf80e6a1f5bd86478d8566863f/cli_ui/__init__.py#L328-L337 | def dot(*, last: bool = False, fileobj: Any = None) -> None:
""" Print a dot without a newline unless it is the last one.
Useful when you want to display a progress with very little
knowledge.
:param last: whether this is the last dot (will insert a newline)
"""
end = "\n" if last else ""
info(".", end=end, fileobj=fileobj) | [
"def",
"dot",
"(",
"*",
",",
"last",
":",
"bool",
"=",
"False",
",",
"fileobj",
":",
"Any",
"=",
"None",
")",
"->",
"None",
":",
"end",
"=",
"\"\\n\"",
"if",
"last",
"else",
"\"\"",
"info",
"(",
"\".\"",
",",
"end",
"=",
"end",
",",
"fileobj",
"=",
"fileobj",
")"
] | Print a dot without a newline unless it is the last one.
Useful when you want to display a progress with very little
knowledge.
:param last: whether this is the last dot (will insert a newline) | [
"Print",
"a",
"dot",
"without",
"a",
"newline",
"unless",
"it",
"is",
"the",
"last",
"one",
"."
] | python | train |
codelv/enaml-native | src/enamlnative/android/android_drawer_layout.py | https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_drawer_layout.py#L75-L90 | def init_widget(self):
""" Initialize the underlying widget.
"""
super(AndroidDrawerLayout, self).init_widget()
d = self.declaration
if d.title:
self.set_title(d.title)
if d.drawer_elevation:
self.set_drawer_elevation(d.drawer_elevation)
if d.lock_mode:
self.set_lock_mode(d.lock_mode)
if d.scrim_color:
self.set_scrim_color(d.scrim_color)
if d.status_bar_background_color:
self.set_status_bar_background_color(d.status_bar_background_color) | [
"def",
"init_widget",
"(",
"self",
")",
":",
"super",
"(",
"AndroidDrawerLayout",
",",
"self",
")",
".",
"init_widget",
"(",
")",
"d",
"=",
"self",
".",
"declaration",
"if",
"d",
".",
"title",
":",
"self",
".",
"set_title",
"(",
"d",
".",
"title",
")",
"if",
"d",
".",
"drawer_elevation",
":",
"self",
".",
"set_drawer_elevation",
"(",
"d",
".",
"drawer_elevation",
")",
"if",
"d",
".",
"lock_mode",
":",
"self",
".",
"set_lock_mode",
"(",
"d",
".",
"lock_mode",
")",
"if",
"d",
".",
"scrim_color",
":",
"self",
".",
"set_scrim_color",
"(",
"d",
".",
"scrim_color",
")",
"if",
"d",
".",
"status_bar_background_color",
":",
"self",
".",
"set_status_bar_background_color",
"(",
"d",
".",
"status_bar_background_color",
")"
] | Initialize the underlying widget. | [
"Initialize",
"the",
"underlying",
"widget",
"."
] | python | train |
ArduPilot/MAVProxy | MAVProxy/modules/lib/grapher.py | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/grapher.py#L192-L332 | def plotit(self, x, y, fields, colors=[], title=None):
'''plot a set of graphs using date for x axis'''
pylab.ion()
self.fig = pylab.figure(num=1, figsize=(12,6))
self.ax1 = self.fig.gca()
ax2 = None
for i in range(0, len(fields)):
if len(x[i]) == 0: continue
if self.lowest_x is None or x[i][0] < self.lowest_x:
self.lowest_x = x[i][0]
if self.highest_x is None or x[i][-1] > self.highest_x:
self.highest_x = x[i][-1]
if self.highest_x is None or self.lowest_x is None:
return
self.formatter = matplotlib.dates.DateFormatter('%H:%M:%S')
if not self.xaxis:
self.setup_xrange(self.highest_x - self.lowest_x)
self.ax1.xaxis.set_major_formatter(self.formatter)
self.ax1.callbacks.connect('xlim_changed', self.xlim_changed)
self.fig.canvas.mpl_connect('draw_event', self.draw_event)
empty = True
ax1_labels = []
ax2_labels = []
for i in range(0, len(fields)):
if len(x[i]) == 0:
#print("Failed to find any values for field %s" % fields[i])
continue
if i < len(colors):
color = colors[i]
else:
color = 'red'
(tz, tzdst) = time.tzname
if self.axes[i] == 2:
if ax2 is None:
ax2 = self.ax1.twinx()
ax2.format_coord = self.make_format(ax2, self.ax1)
ax = ax2
if not self.xaxis:
ax2.xaxis.set_major_locator(self.locator)
ax2.xaxis.set_major_formatter(self.formatter)
label = fields[i]
if label.endswith(":2"):
label = label[:-2]
ax2_labels.append(label)
else:
ax1_labels.append(fields[i])
ax = self.ax1
if self.xaxis:
if self.marker is not None:
marker = self.marker
else:
marker = '+'
if self.linestyle is not None:
linestyle = self.linestyle
else:
linestyle = 'None'
ax.plot(x[i], y[i], color=color, label=fields[i],
linestyle=linestyle, marker=marker)
else:
if self.marker is not None:
marker = self.marker
else:
marker = 'None'
if self.linestyle is not None:
linestyle = self.linestyle
else:
linestyle = '-'
if len(y[i]) > 0 and isinstance(y[i][0],str):
# assume this is a piece of text to be rendered at a point in time
last_text_time = -1
last_text = None
for n in range(0, len(x[i])):
this_text_time = round(x[i][n], 6)
this_text = y[i][n]
if last_text is None:
last_text = "[y" + this_text + "]"
last_text_time = this_text_time
elif this_text_time == last_text_time:
last_text += ("[x" + this_text + "]")
else:
ax.text(last_text_time,
10,
last_text,
rotation=90,
alpha=0.3,
verticalalignment='baseline')
last_text = this_text
last_text_time = this_text_time
if last_text is not None:
ax.text(last_text_time, 10, last_text,
rotation=90,
alpha=0.3,
verticalalignment='baseline')
else:
ax.plot_date(x[i], y[i], color=color, label=fields[i],
linestyle=linestyle, marker=marker, tz=None)
empty = False
if self.show_flightmode:
alpha = 0.3
xlim = self.ax1.get_xlim()
for i in range(len(self.flightmode_list)):
(mode_name,t0,t1) = self.flightmode_list[i]
c = self.flightmode_colour(mode_name)
tday0 = self.timestamp_to_days(t0)
tday1 = self.timestamp_to_days(t1)
if tday0 > xlim[1] or tday1 < xlim[0]:
continue
tday0 = max(tday0, xlim[0])
tday1 = min(tday1, xlim[1])
self.ax1.axvspan(tday0, tday1, fc=c, ec=edge_colour, alpha=alpha)
self.modes_plotted[mode_name] = (c, alpha)
if empty:
print("No data to graph")
return
if title is not None:
pylab.title(title)
if self.show_flightmode:
mode_patches = []
for mode in self.modes_plotted.keys():
(color, alpha) = self.modes_plotted[mode]
mode_patches.append(matplotlib.patches.Patch(color=color,
label=mode, alpha=alpha*1.5))
labels = [patch.get_label() for patch in mode_patches]
if ax1_labels != []:
patches_legend = matplotlib.pyplot.legend(mode_patches, labels, loc=self.legend_flightmode)
self.fig.gca().add_artist(patches_legend)
else:
pylab.legend(mode_patches, labels)
if ax1_labels != []:
self.ax1.legend(ax1_labels,loc=self.legend)
if ax2_labels != []:
ax2.legend(ax2_labels,loc=self.legend2) | [
"def",
"plotit",
"(",
"self",
",",
"x",
",",
"y",
",",
"fields",
",",
"colors",
"=",
"[",
"]",
",",
"title",
"=",
"None",
")",
":",
"pylab",
".",
"ion",
"(",
")",
"self",
".",
"fig",
"=",
"pylab",
".",
"figure",
"(",
"num",
"=",
"1",
",",
"figsize",
"=",
"(",
"12",
",",
"6",
")",
")",
"self",
".",
"ax1",
"=",
"self",
".",
"fig",
".",
"gca",
"(",
")",
"ax2",
"=",
"None",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"fields",
")",
")",
":",
"if",
"len",
"(",
"x",
"[",
"i",
"]",
")",
"==",
"0",
":",
"continue",
"if",
"self",
".",
"lowest_x",
"is",
"None",
"or",
"x",
"[",
"i",
"]",
"[",
"0",
"]",
"<",
"self",
".",
"lowest_x",
":",
"self",
".",
"lowest_x",
"=",
"x",
"[",
"i",
"]",
"[",
"0",
"]",
"if",
"self",
".",
"highest_x",
"is",
"None",
"or",
"x",
"[",
"i",
"]",
"[",
"-",
"1",
"]",
">",
"self",
".",
"highest_x",
":",
"self",
".",
"highest_x",
"=",
"x",
"[",
"i",
"]",
"[",
"-",
"1",
"]",
"if",
"self",
".",
"highest_x",
"is",
"None",
"or",
"self",
".",
"lowest_x",
"is",
"None",
":",
"return",
"self",
".",
"formatter",
"=",
"matplotlib",
".",
"dates",
".",
"DateFormatter",
"(",
"'%H:%M:%S'",
")",
"if",
"not",
"self",
".",
"xaxis",
":",
"self",
".",
"setup_xrange",
"(",
"self",
".",
"highest_x",
"-",
"self",
".",
"lowest_x",
")",
"self",
".",
"ax1",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"self",
".",
"formatter",
")",
"self",
".",
"ax1",
".",
"callbacks",
".",
"connect",
"(",
"'xlim_changed'",
",",
"self",
".",
"xlim_changed",
")",
"self",
".",
"fig",
".",
"canvas",
".",
"mpl_connect",
"(",
"'draw_event'",
",",
"self",
".",
"draw_event",
")",
"empty",
"=",
"True",
"ax1_labels",
"=",
"[",
"]",
"ax2_labels",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"fields",
")",
")",
":",
"if",
"len",
"(",
"x",
"[",
"i",
"]",
")",
"==",
"0",
":",
"#print(\"Failed to find any values for field %s\" % fields[i])",
"continue",
"if",
"i",
"<",
"len",
"(",
"colors",
")",
":",
"color",
"=",
"colors",
"[",
"i",
"]",
"else",
":",
"color",
"=",
"'red'",
"(",
"tz",
",",
"tzdst",
")",
"=",
"time",
".",
"tzname",
"if",
"self",
".",
"axes",
"[",
"i",
"]",
"==",
"2",
":",
"if",
"ax2",
"is",
"None",
":",
"ax2",
"=",
"self",
".",
"ax1",
".",
"twinx",
"(",
")",
"ax2",
".",
"format_coord",
"=",
"self",
".",
"make_format",
"(",
"ax2",
",",
"self",
".",
"ax1",
")",
"ax",
"=",
"ax2",
"if",
"not",
"self",
".",
"xaxis",
":",
"ax2",
".",
"xaxis",
".",
"set_major_locator",
"(",
"self",
".",
"locator",
")",
"ax2",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"self",
".",
"formatter",
")",
"label",
"=",
"fields",
"[",
"i",
"]",
"if",
"label",
".",
"endswith",
"(",
"\":2\"",
")",
":",
"label",
"=",
"label",
"[",
":",
"-",
"2",
"]",
"ax2_labels",
".",
"append",
"(",
"label",
")",
"else",
":",
"ax1_labels",
".",
"append",
"(",
"fields",
"[",
"i",
"]",
")",
"ax",
"=",
"self",
".",
"ax1",
"if",
"self",
".",
"xaxis",
":",
"if",
"self",
".",
"marker",
"is",
"not",
"None",
":",
"marker",
"=",
"self",
".",
"marker",
"else",
":",
"marker",
"=",
"'+'",
"if",
"self",
".",
"linestyle",
"is",
"not",
"None",
":",
"linestyle",
"=",
"self",
".",
"linestyle",
"else",
":",
"linestyle",
"=",
"'None'",
"ax",
".",
"plot",
"(",
"x",
"[",
"i",
"]",
",",
"y",
"[",
"i",
"]",
",",
"color",
"=",
"color",
",",
"label",
"=",
"fields",
"[",
"i",
"]",
",",
"linestyle",
"=",
"linestyle",
",",
"marker",
"=",
"marker",
")",
"else",
":",
"if",
"self",
".",
"marker",
"is",
"not",
"None",
":",
"marker",
"=",
"self",
".",
"marker",
"else",
":",
"marker",
"=",
"'None'",
"if",
"self",
".",
"linestyle",
"is",
"not",
"None",
":",
"linestyle",
"=",
"self",
".",
"linestyle",
"else",
":",
"linestyle",
"=",
"'-'",
"if",
"len",
"(",
"y",
"[",
"i",
"]",
")",
">",
"0",
"and",
"isinstance",
"(",
"y",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"str",
")",
":",
"# assume this is a piece of text to be rendered at a point in time",
"last_text_time",
"=",
"-",
"1",
"last_text",
"=",
"None",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"x",
"[",
"i",
"]",
")",
")",
":",
"this_text_time",
"=",
"round",
"(",
"x",
"[",
"i",
"]",
"[",
"n",
"]",
",",
"6",
")",
"this_text",
"=",
"y",
"[",
"i",
"]",
"[",
"n",
"]",
"if",
"last_text",
"is",
"None",
":",
"last_text",
"=",
"\"[y\"",
"+",
"this_text",
"+",
"\"]\"",
"last_text_time",
"=",
"this_text_time",
"elif",
"this_text_time",
"==",
"last_text_time",
":",
"last_text",
"+=",
"(",
"\"[x\"",
"+",
"this_text",
"+",
"\"]\"",
")",
"else",
":",
"ax",
".",
"text",
"(",
"last_text_time",
",",
"10",
",",
"last_text",
",",
"rotation",
"=",
"90",
",",
"alpha",
"=",
"0.3",
",",
"verticalalignment",
"=",
"'baseline'",
")",
"last_text",
"=",
"this_text",
"last_text_time",
"=",
"this_text_time",
"if",
"last_text",
"is",
"not",
"None",
":",
"ax",
".",
"text",
"(",
"last_text_time",
",",
"10",
",",
"last_text",
",",
"rotation",
"=",
"90",
",",
"alpha",
"=",
"0.3",
",",
"verticalalignment",
"=",
"'baseline'",
")",
"else",
":",
"ax",
".",
"plot_date",
"(",
"x",
"[",
"i",
"]",
",",
"y",
"[",
"i",
"]",
",",
"color",
"=",
"color",
",",
"label",
"=",
"fields",
"[",
"i",
"]",
",",
"linestyle",
"=",
"linestyle",
",",
"marker",
"=",
"marker",
",",
"tz",
"=",
"None",
")",
"empty",
"=",
"False",
"if",
"self",
".",
"show_flightmode",
":",
"alpha",
"=",
"0.3",
"xlim",
"=",
"self",
".",
"ax1",
".",
"get_xlim",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"flightmode_list",
")",
")",
":",
"(",
"mode_name",
",",
"t0",
",",
"t1",
")",
"=",
"self",
".",
"flightmode_list",
"[",
"i",
"]",
"c",
"=",
"self",
".",
"flightmode_colour",
"(",
"mode_name",
")",
"tday0",
"=",
"self",
".",
"timestamp_to_days",
"(",
"t0",
")",
"tday1",
"=",
"self",
".",
"timestamp_to_days",
"(",
"t1",
")",
"if",
"tday0",
">",
"xlim",
"[",
"1",
"]",
"or",
"tday1",
"<",
"xlim",
"[",
"0",
"]",
":",
"continue",
"tday0",
"=",
"max",
"(",
"tday0",
",",
"xlim",
"[",
"0",
"]",
")",
"tday1",
"=",
"min",
"(",
"tday1",
",",
"xlim",
"[",
"1",
"]",
")",
"self",
".",
"ax1",
".",
"axvspan",
"(",
"tday0",
",",
"tday1",
",",
"fc",
"=",
"c",
",",
"ec",
"=",
"edge_colour",
",",
"alpha",
"=",
"alpha",
")",
"self",
".",
"modes_plotted",
"[",
"mode_name",
"]",
"=",
"(",
"c",
",",
"alpha",
")",
"if",
"empty",
":",
"print",
"(",
"\"No data to graph\"",
")",
"return",
"if",
"title",
"is",
"not",
"None",
":",
"pylab",
".",
"title",
"(",
"title",
")",
"if",
"self",
".",
"show_flightmode",
":",
"mode_patches",
"=",
"[",
"]",
"for",
"mode",
"in",
"self",
".",
"modes_plotted",
".",
"keys",
"(",
")",
":",
"(",
"color",
",",
"alpha",
")",
"=",
"self",
".",
"modes_plotted",
"[",
"mode",
"]",
"mode_patches",
".",
"append",
"(",
"matplotlib",
".",
"patches",
".",
"Patch",
"(",
"color",
"=",
"color",
",",
"label",
"=",
"mode",
",",
"alpha",
"=",
"alpha",
"*",
"1.5",
")",
")",
"labels",
"=",
"[",
"patch",
".",
"get_label",
"(",
")",
"for",
"patch",
"in",
"mode_patches",
"]",
"if",
"ax1_labels",
"!=",
"[",
"]",
":",
"patches_legend",
"=",
"matplotlib",
".",
"pyplot",
".",
"legend",
"(",
"mode_patches",
",",
"labels",
",",
"loc",
"=",
"self",
".",
"legend_flightmode",
")",
"self",
".",
"fig",
".",
"gca",
"(",
")",
".",
"add_artist",
"(",
"patches_legend",
")",
"else",
":",
"pylab",
".",
"legend",
"(",
"mode_patches",
",",
"labels",
")",
"if",
"ax1_labels",
"!=",
"[",
"]",
":",
"self",
".",
"ax1",
".",
"legend",
"(",
"ax1_labels",
",",
"loc",
"=",
"self",
".",
"legend",
")",
"if",
"ax2_labels",
"!=",
"[",
"]",
":",
"ax2",
".",
"legend",
"(",
"ax2_labels",
",",
"loc",
"=",
"self",
".",
"legend2",
")"
] | plot a set of graphs using date for x axis | [
"plot",
"a",
"set",
"of",
"graphs",
"using",
"date",
"for",
"x",
"axis"
] | python | train |
kgori/treeCl | treeCl/plotter.py | https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/plotter.py#L31-L82 | def heatmap(dm, partition=None, cmap=CM.Blues, fontsize=10):
""" heatmap(dm, partition=None, cmap=CM.Blues, fontsize=10)
Produce a 2D plot of the distance matrix, with values encoded by
coloured cells.
Args:
partition: treeCl.Partition object - if supplied, will reorder
rows and columns of the distance matrix to reflect
the groups defined by the partition
cmap: matplotlib colourmap object - the colour palette to use
fontsize: int or None - sets the size of the locus lab
Returns:
matplotlib plottable object
"""
assert isinstance(dm, DistanceMatrix)
datamax = float(np.abs(dm.values).max())
length = dm.shape[0]
if partition:
sorting = np.array(flatten_list(partition.get_membership()))
new_dm = dm.reorder(dm.df.columns[sorting])
else:
new_dm = dm
fig = plt.figure()
ax = fig.add_subplot(111)
ax.xaxis.tick_top()
ax.grid(False)
tick_positions = np.array(list(range(length))) + 0.5
if fontsize is not None:
ax.set_yticks(tick_positions)
ax.set_xticks(tick_positions)
ax.set_xticklabels(new_dm.df.columns, rotation=90, fontsize=fontsize, ha='center')
ax.set_yticklabels(new_dm.df.index, fontsize=fontsize, va='center')
cbar_ticks_at = [0, 0.5 * datamax, datamax]
cax = ax.imshow(
new_dm.values,
interpolation='nearest',
extent=[0., length, length, 0.],
vmin=0,
vmax=datamax,
cmap=cmap,
)
cbar = fig.colorbar(cax, ticks=cbar_ticks_at, format='%1.2g')
cbar.set_label('Distance')
return fig | [
"def",
"heatmap",
"(",
"dm",
",",
"partition",
"=",
"None",
",",
"cmap",
"=",
"CM",
".",
"Blues",
",",
"fontsize",
"=",
"10",
")",
":",
"assert",
"isinstance",
"(",
"dm",
",",
"DistanceMatrix",
")",
"datamax",
"=",
"float",
"(",
"np",
".",
"abs",
"(",
"dm",
".",
"values",
")",
".",
"max",
"(",
")",
")",
"length",
"=",
"dm",
".",
"shape",
"[",
"0",
"]",
"if",
"partition",
":",
"sorting",
"=",
"np",
".",
"array",
"(",
"flatten_list",
"(",
"partition",
".",
"get_membership",
"(",
")",
")",
")",
"new_dm",
"=",
"dm",
".",
"reorder",
"(",
"dm",
".",
"df",
".",
"columns",
"[",
"sorting",
"]",
")",
"else",
":",
"new_dm",
"=",
"dm",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"ax",
".",
"xaxis",
".",
"tick_top",
"(",
")",
"ax",
".",
"grid",
"(",
"False",
")",
"tick_positions",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"range",
"(",
"length",
")",
")",
")",
"+",
"0.5",
"if",
"fontsize",
"is",
"not",
"None",
":",
"ax",
".",
"set_yticks",
"(",
"tick_positions",
")",
"ax",
".",
"set_xticks",
"(",
"tick_positions",
")",
"ax",
".",
"set_xticklabels",
"(",
"new_dm",
".",
"df",
".",
"columns",
",",
"rotation",
"=",
"90",
",",
"fontsize",
"=",
"fontsize",
",",
"ha",
"=",
"'center'",
")",
"ax",
".",
"set_yticklabels",
"(",
"new_dm",
".",
"df",
".",
"index",
",",
"fontsize",
"=",
"fontsize",
",",
"va",
"=",
"'center'",
")",
"cbar_ticks_at",
"=",
"[",
"0",
",",
"0.5",
"*",
"datamax",
",",
"datamax",
"]",
"cax",
"=",
"ax",
".",
"imshow",
"(",
"new_dm",
".",
"values",
",",
"interpolation",
"=",
"'nearest'",
",",
"extent",
"=",
"[",
"0.",
",",
"length",
",",
"length",
",",
"0.",
"]",
",",
"vmin",
"=",
"0",
",",
"vmax",
"=",
"datamax",
",",
"cmap",
"=",
"cmap",
",",
")",
"cbar",
"=",
"fig",
".",
"colorbar",
"(",
"cax",
",",
"ticks",
"=",
"cbar_ticks_at",
",",
"format",
"=",
"'%1.2g'",
")",
"cbar",
".",
"set_label",
"(",
"'Distance'",
")",
"return",
"fig"
] | heatmap(dm, partition=None, cmap=CM.Blues, fontsize=10)
Produce a 2D plot of the distance matrix, with values encoded by
coloured cells.
Args:
partition: treeCl.Partition object - if supplied, will reorder
rows and columns of the distance matrix to reflect
the groups defined by the partition
cmap: matplotlib colourmap object - the colour palette to use
fontsize: int or None - sets the size of the locus lab
Returns:
matplotlib plottable object | [
"heatmap",
"(",
"dm",
"partition",
"=",
"None",
"cmap",
"=",
"CM",
".",
"Blues",
"fontsize",
"=",
"10",
")",
"Produce",
"a",
"2D",
"plot",
"of",
"the",
"distance",
"matrix",
"with",
"values",
"encoded",
"by",
"coloured",
"cells",
"."
] | python | train |
pybel/pybel-tools | src/pybel_tools/selection/metapaths.py | https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/selection/metapaths.py#L37-L55 | def get_walks_exhaustive(graph, node, length):
"""Gets all walks under a given length starting at a given node
:param networkx.Graph graph: A graph
:param node: Starting node
:param int length: The length of walks to get
:return: A list of paths
:rtype: list[tuple]
"""
if 0 == length:
return (node,),
return tuple(
(node, key) + path
for neighbor in graph.edge[node]
for path in get_walks_exhaustive(graph, neighbor, length - 1)
if node not in path
for key in graph.edge[node][neighbor]
) | [
"def",
"get_walks_exhaustive",
"(",
"graph",
",",
"node",
",",
"length",
")",
":",
"if",
"0",
"==",
"length",
":",
"return",
"(",
"node",
",",
")",
",",
"return",
"tuple",
"(",
"(",
"node",
",",
"key",
")",
"+",
"path",
"for",
"neighbor",
"in",
"graph",
".",
"edge",
"[",
"node",
"]",
"for",
"path",
"in",
"get_walks_exhaustive",
"(",
"graph",
",",
"neighbor",
",",
"length",
"-",
"1",
")",
"if",
"node",
"not",
"in",
"path",
"for",
"key",
"in",
"graph",
".",
"edge",
"[",
"node",
"]",
"[",
"neighbor",
"]",
")"
] | Gets all walks under a given length starting at a given node
:param networkx.Graph graph: A graph
:param node: Starting node
:param int length: The length of walks to get
:return: A list of paths
:rtype: list[tuple] | [
"Gets",
"all",
"walks",
"under",
"a",
"given",
"length",
"starting",
"at",
"a",
"given",
"node"
] | python | valid |
mozilla/python-zeppelin | zeppelin/converters/markdown.py | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L60-L64 | def build_code(self, lang, body):
"""Wrap text with markdown specific flavour."""
self.out.append("```" + lang)
self.build_markdown(lang, body)
self.out.append("```") | [
"def",
"build_code",
"(",
"self",
",",
"lang",
",",
"body",
")",
":",
"self",
".",
"out",
".",
"append",
"(",
"\"```\"",
"+",
"lang",
")",
"self",
".",
"build_markdown",
"(",
"lang",
",",
"body",
")",
"self",
".",
"out",
".",
"append",
"(",
"\"```\"",
")"
] | Wrap text with markdown specific flavour. | [
"Wrap",
"text",
"with",
"markdown",
"specific",
"flavour",
"."
] | python | train |
ladybug-tools/ladybug | ladybug/analysisperiod.py | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/analysisperiod.py#L287-L294 | def doys_int(self):
"""A sorted list of days of the year in this analysis period as integers."""
if not self._is_reversed:
return self._calc_daystamps(self.st_time, self.end_time)
else:
doys_st = self._calc_daystamps(self.st_time, DateTime.from_hoy(8759))
doys_end = self._calc_daystamps(DateTime.from_hoy(0), self.end_time)
return doys_st + doys_end | [
"def",
"doys_int",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_is_reversed",
":",
"return",
"self",
".",
"_calc_daystamps",
"(",
"self",
".",
"st_time",
",",
"self",
".",
"end_time",
")",
"else",
":",
"doys_st",
"=",
"self",
".",
"_calc_daystamps",
"(",
"self",
".",
"st_time",
",",
"DateTime",
".",
"from_hoy",
"(",
"8759",
")",
")",
"doys_end",
"=",
"self",
".",
"_calc_daystamps",
"(",
"DateTime",
".",
"from_hoy",
"(",
"0",
")",
",",
"self",
".",
"end_time",
")",
"return",
"doys_st",
"+",
"doys_end"
] | A sorted list of days of the year in this analysis period as integers. | [
"A",
"sorted",
"list",
"of",
"days",
"of",
"the",
"year",
"in",
"this",
"analysis",
"period",
"as",
"integers",
"."
] | python | train |
cltk/cltk | cltk/corpus/readers.py | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/readers.py#L295-L305 | def words(self, fileids=None) -> Generator[str, str, None]:
"""
Provide the words of the corpus; skipping any paragraphs flagged by keywords to the main
class constructor
:param fileids:
:return: words, including punctuation, one by one
"""
for sentence in self.sents(fileids):
words = self._word_tokenizer.tokenize(sentence)
for word in words:
yield word | [
"def",
"words",
"(",
"self",
",",
"fileids",
"=",
"None",
")",
"->",
"Generator",
"[",
"str",
",",
"str",
",",
"None",
"]",
":",
"for",
"sentence",
"in",
"self",
".",
"sents",
"(",
"fileids",
")",
":",
"words",
"=",
"self",
".",
"_word_tokenizer",
".",
"tokenize",
"(",
"sentence",
")",
"for",
"word",
"in",
"words",
":",
"yield",
"word"
] | Provide the words of the corpus; skipping any paragraphs flagged by keywords to the main
class constructor
:param fileids:
:return: words, including punctuation, one by one | [
"Provide",
"the",
"words",
"of",
"the",
"corpus",
";",
"skipping",
"any",
"paragraphs",
"flagged",
"by",
"keywords",
"to",
"the",
"main",
"class",
"constructor",
":",
"param",
"fileids",
":",
":",
"return",
":",
"words",
"including",
"punctuation",
"one",
"by",
"one"
] | python | train |
Ceasar/staticjinja | staticjinja/staticjinja.py | https://github.com/Ceasar/staticjinja/blob/57b8cac81da7fee3387510af4843e1bd1fd3ba28/staticjinja/staticjinja.py#L386-L399 | def render_templates(self, templates, filepath=None):
"""Render a collection of :class:`jinja2.Template` objects.
:param templates:
A collection of Templates to render.
:param filepath:
Optional. A file or file-like object to dump the complete template
stream into. Defaults to to ``os.path.join(self.outpath,
template.name)``.
"""
for template in templates:
self.render_template(template, filepath) | [
"def",
"render_templates",
"(",
"self",
",",
"templates",
",",
"filepath",
"=",
"None",
")",
":",
"for",
"template",
"in",
"templates",
":",
"self",
".",
"render_template",
"(",
"template",
",",
"filepath",
")"
] | Render a collection of :class:`jinja2.Template` objects.
:param templates:
A collection of Templates to render.
:param filepath:
Optional. A file or file-like object to dump the complete template
stream into. Defaults to to ``os.path.join(self.outpath,
template.name)``. | [
"Render",
"a",
"collection",
"of",
":",
"class",
":",
"jinja2",
".",
"Template",
"objects",
"."
] | python | train |
TissueMAPS/TmClient | src/python/tmclient/api.py | https://github.com/TissueMAPS/TmClient/blob/6fb40622af19142cb5169a64b8c2965993a25ab1/src/python/tmclient/api.py#L1927-L1955 | def rename_mapobject_type(self, name, new_name):
'''Renames a mapobject type.
Parameters
----------
name: str
name of the mapobject type that should be renamed
new_name: str
name that should be given to the mapobject type
See also
--------
:func:`tmserver.api.mapobject.update_mapobject_type`
:class:`tmlib.models.mapobject.MapobjectType`
'''
logger.info(
'rename mapobject type "%s" of experiment "%s"',
name, self.experiment_name
)
content = {'name': new_name}
mapobject_type_id = self._get_mapobject_type_id(name)
url = self._build_api_url(
'/experiments/{experiment_id}/mapobject_types/{mapobject_type_id}'.format(
experiment_id=self._experiment_id,
mapobject_type_id=mapobject_type_id
)
)
res = self._session.put(url, json=content)
res.raise_for_status() | [
"def",
"rename_mapobject_type",
"(",
"self",
",",
"name",
",",
"new_name",
")",
":",
"logger",
".",
"info",
"(",
"'rename mapobject type \"%s\" of experiment \"%s\"'",
",",
"name",
",",
"self",
".",
"experiment_name",
")",
"content",
"=",
"{",
"'name'",
":",
"new_name",
"}",
"mapobject_type_id",
"=",
"self",
".",
"_get_mapobject_type_id",
"(",
"name",
")",
"url",
"=",
"self",
".",
"_build_api_url",
"(",
"'/experiments/{experiment_id}/mapobject_types/{mapobject_type_id}'",
".",
"format",
"(",
"experiment_id",
"=",
"self",
".",
"_experiment_id",
",",
"mapobject_type_id",
"=",
"mapobject_type_id",
")",
")",
"res",
"=",
"self",
".",
"_session",
".",
"put",
"(",
"url",
",",
"json",
"=",
"content",
")",
"res",
".",
"raise_for_status",
"(",
")"
] | Renames a mapobject type.
Parameters
----------
name: str
name of the mapobject type that should be renamed
new_name: str
name that should be given to the mapobject type
See also
--------
:func:`tmserver.api.mapobject.update_mapobject_type`
:class:`tmlib.models.mapobject.MapobjectType` | [
"Renames",
"a",
"mapobject",
"type",
"."
] | python | train |
bretth/woven | woven/linux.py | https://github.com/bretth/woven/blob/ec1da7b401a335f43129e7115fe7a4d145649f1e/woven/linux.py#L244-L272 | def lsb_release():
"""
Get the linux distribution information and return in an attribute dict
The following attributes should be available:
base, distributor_id, description, release, codename
For example Ubuntu Lucid would return
base = debian
distributor_id = Ubuntu
description = Ubuntu 10.04.x LTS
release = 10.04
codename = lucid
"""
output = run('lsb_release -a').split('\n')
release = _AttributeDict({})
for line in output:
try:
key, value = line.split(':')
except ValueError:
continue
release[key.strip().replace(' ','_').lower()]=value.strip()
if exists('/etc/debian_version'): release.base = 'debian'
elif exists('/etc/redhat-release'): release.base = 'redhat'
else: release.base = 'unknown'
return release | [
"def",
"lsb_release",
"(",
")",
":",
"output",
"=",
"run",
"(",
"'lsb_release -a'",
")",
".",
"split",
"(",
"'\\n'",
")",
"release",
"=",
"_AttributeDict",
"(",
"{",
"}",
")",
"for",
"line",
"in",
"output",
":",
"try",
":",
"key",
",",
"value",
"=",
"line",
".",
"split",
"(",
"':'",
")",
"except",
"ValueError",
":",
"continue",
"release",
"[",
"key",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
".",
"lower",
"(",
")",
"]",
"=",
"value",
".",
"strip",
"(",
")",
"if",
"exists",
"(",
"'/etc/debian_version'",
")",
":",
"release",
".",
"base",
"=",
"'debian'",
"elif",
"exists",
"(",
"'/etc/redhat-release'",
")",
":",
"release",
".",
"base",
"=",
"'redhat'",
"else",
":",
"release",
".",
"base",
"=",
"'unknown'",
"return",
"release"
] | Get the linux distribution information and return in an attribute dict
The following attributes should be available:
base, distributor_id, description, release, codename
For example Ubuntu Lucid would return
base = debian
distributor_id = Ubuntu
description = Ubuntu 10.04.x LTS
release = 10.04
codename = lucid | [
"Get",
"the",
"linux",
"distribution",
"information",
"and",
"return",
"in",
"an",
"attribute",
"dict",
"The",
"following",
"attributes",
"should",
"be",
"available",
":",
"base",
"distributor_id",
"description",
"release",
"codename",
"For",
"example",
"Ubuntu",
"Lucid",
"would",
"return",
"base",
"=",
"debian",
"distributor_id",
"=",
"Ubuntu",
"description",
"=",
"Ubuntu",
"10",
".",
"04",
".",
"x",
"LTS",
"release",
"=",
"10",
".",
"04",
"codename",
"=",
"lucid"
] | python | train |
Mindwerks/worldengine | worldengine/draw.py | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/draw.py#L234-L256 | def get_normalized_elevation_array(world):
''' Convert raw elevation into normalized values between 0 and 255,
and return a numpy array of these values '''
e = world.layers['elevation'].data
ocean = world.layers['ocean'].data
mask = numpy.ma.array(e, mask=ocean) # only land
min_elev_land = mask.min()
max_elev_land = mask.max()
elev_delta_land = max_elev_land - min_elev_land
mask = numpy.ma.array(e, mask=numpy.logical_not(ocean)) # only ocean
min_elev_sea = mask.min()
max_elev_sea = mask.max()
elev_delta_sea = max_elev_sea - min_elev_sea
c = numpy.empty(e.shape, dtype=numpy.float)
c[numpy.invert(ocean)] = (e[numpy.invert(ocean)] - min_elev_land) * 127 / elev_delta_land + 128
c[ocean] = (e[ocean] - min_elev_sea) * 127 / elev_delta_sea
c = numpy.rint(c).astype(dtype=numpy.int32) # proper rounding
return c | [
"def",
"get_normalized_elevation_array",
"(",
"world",
")",
":",
"e",
"=",
"world",
".",
"layers",
"[",
"'elevation'",
"]",
".",
"data",
"ocean",
"=",
"world",
".",
"layers",
"[",
"'ocean'",
"]",
".",
"data",
"mask",
"=",
"numpy",
".",
"ma",
".",
"array",
"(",
"e",
",",
"mask",
"=",
"ocean",
")",
"# only land",
"min_elev_land",
"=",
"mask",
".",
"min",
"(",
")",
"max_elev_land",
"=",
"mask",
".",
"max",
"(",
")",
"elev_delta_land",
"=",
"max_elev_land",
"-",
"min_elev_land",
"mask",
"=",
"numpy",
".",
"ma",
".",
"array",
"(",
"e",
",",
"mask",
"=",
"numpy",
".",
"logical_not",
"(",
"ocean",
")",
")",
"# only ocean",
"min_elev_sea",
"=",
"mask",
".",
"min",
"(",
")",
"max_elev_sea",
"=",
"mask",
".",
"max",
"(",
")",
"elev_delta_sea",
"=",
"max_elev_sea",
"-",
"min_elev_sea",
"c",
"=",
"numpy",
".",
"empty",
"(",
"e",
".",
"shape",
",",
"dtype",
"=",
"numpy",
".",
"float",
")",
"c",
"[",
"numpy",
".",
"invert",
"(",
"ocean",
")",
"]",
"=",
"(",
"e",
"[",
"numpy",
".",
"invert",
"(",
"ocean",
")",
"]",
"-",
"min_elev_land",
")",
"*",
"127",
"/",
"elev_delta_land",
"+",
"128",
"c",
"[",
"ocean",
"]",
"=",
"(",
"e",
"[",
"ocean",
"]",
"-",
"min_elev_sea",
")",
"*",
"127",
"/",
"elev_delta_sea",
"c",
"=",
"numpy",
".",
"rint",
"(",
"c",
")",
".",
"astype",
"(",
"dtype",
"=",
"numpy",
".",
"int32",
")",
"# proper rounding",
"return",
"c"
] | Convert raw elevation into normalized values between 0 and 255,
and return a numpy array of these values | [
"Convert",
"raw",
"elevation",
"into",
"normalized",
"values",
"between",
"0",
"and",
"255",
"and",
"return",
"a",
"numpy",
"array",
"of",
"these",
"values"
] | python | train |
CiscoUcs/UcsPythonSDK | src/UcsSdk/UcsBase.py | https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/UcsBase.py#L460-L502 | def LoadFromXml(self, node, handle):
""" Method updates/fills the object from the xml representation of the managed object. """
from Ucs import ClassFactory
self.SetHandle(handle)
if node.hasAttributes():
# attributes = node._get_attributes()
# attCount = attributes._get_length()
attributes = node.attributes
attCount = len(attributes)
for i in range(attCount):
attNode = attributes.item(i)
# attr = UcsUtils.WordU(attNode._get_name())
attr = UcsUtils.WordU(attNode.localName)
if (attr in UcsUtils.GetUcsPropertyMetaAttributeList(self.classId)):
atMeta = UcsUtils.GetUcsMethodMeta(self.classId, attr)
if ((atMeta.io == "Input") or (atMeta.isComplexType)):
continue
# self.setattr(attr, str(attNode.nodeValue))
self.setattr(attr, str(attNode.value))
# elif (attNode._get_name() in externalMethodAttrs):
# self.setattr(attNode._get_name(), str(attNode.nodeValue))
elif (attNode.localName in externalMethodAttrs):
self.setattr(attNode.localName, str(attNode.value))
if (node.hasChildNodes()):
# childList = node._get_childNodes()
# childCount = childList._get_length()
childList = node.childNodes
childCount = len(childList)
for i in range(childCount):
childNode = childList.item(i)
if (childNode.nodeType != Node.ELEMENT_NODE):
continue
cln = UcsUtils.WordU(childNode.localName)
if cln in UcsUtils.GetUcsPropertyMetaAttributeList(self.classId):
atMeta = UcsUtils.GetUcsMethodMeta(self.classId, cln)
if ((atMeta.io == "Output") and (atMeta.isComplexType)):
c = ClassFactory(atMeta.fieldType)
if (c != None):
self.setattr(cln, c)
c.LoadFromXml(childNode, handle) | [
"def",
"LoadFromXml",
"(",
"self",
",",
"node",
",",
"handle",
")",
":",
"from",
"Ucs",
"import",
"ClassFactory",
"self",
".",
"SetHandle",
"(",
"handle",
")",
"if",
"node",
".",
"hasAttributes",
"(",
")",
":",
"# attributes = node._get_attributes()",
"# attCount = attributes._get_length()",
"attributes",
"=",
"node",
".",
"attributes",
"attCount",
"=",
"len",
"(",
"attributes",
")",
"for",
"i",
"in",
"range",
"(",
"attCount",
")",
":",
"attNode",
"=",
"attributes",
".",
"item",
"(",
"i",
")",
"# attr = UcsUtils.WordU(attNode._get_name())",
"attr",
"=",
"UcsUtils",
".",
"WordU",
"(",
"attNode",
".",
"localName",
")",
"if",
"(",
"attr",
"in",
"UcsUtils",
".",
"GetUcsPropertyMetaAttributeList",
"(",
"self",
".",
"classId",
")",
")",
":",
"atMeta",
"=",
"UcsUtils",
".",
"GetUcsMethodMeta",
"(",
"self",
".",
"classId",
",",
"attr",
")",
"if",
"(",
"(",
"atMeta",
".",
"io",
"==",
"\"Input\"",
")",
"or",
"(",
"atMeta",
".",
"isComplexType",
")",
")",
":",
"continue",
"# self.setattr(attr, str(attNode.nodeValue))",
"self",
".",
"setattr",
"(",
"attr",
",",
"str",
"(",
"attNode",
".",
"value",
")",
")",
"# elif (attNode._get_name() in externalMethodAttrs):",
"#\tself.setattr(attNode._get_name(), str(attNode.nodeValue))",
"elif",
"(",
"attNode",
".",
"localName",
"in",
"externalMethodAttrs",
")",
":",
"self",
".",
"setattr",
"(",
"attNode",
".",
"localName",
",",
"str",
"(",
"attNode",
".",
"value",
")",
")",
"if",
"(",
"node",
".",
"hasChildNodes",
"(",
")",
")",
":",
"# childList = node._get_childNodes()",
"# childCount = childList._get_length()",
"childList",
"=",
"node",
".",
"childNodes",
"childCount",
"=",
"len",
"(",
"childList",
")",
"for",
"i",
"in",
"range",
"(",
"childCount",
")",
":",
"childNode",
"=",
"childList",
".",
"item",
"(",
"i",
")",
"if",
"(",
"childNode",
".",
"nodeType",
"!=",
"Node",
".",
"ELEMENT_NODE",
")",
":",
"continue",
"cln",
"=",
"UcsUtils",
".",
"WordU",
"(",
"childNode",
".",
"localName",
")",
"if",
"cln",
"in",
"UcsUtils",
".",
"GetUcsPropertyMetaAttributeList",
"(",
"self",
".",
"classId",
")",
":",
"atMeta",
"=",
"UcsUtils",
".",
"GetUcsMethodMeta",
"(",
"self",
".",
"classId",
",",
"cln",
")",
"if",
"(",
"(",
"atMeta",
".",
"io",
"==",
"\"Output\"",
")",
"and",
"(",
"atMeta",
".",
"isComplexType",
")",
")",
":",
"c",
"=",
"ClassFactory",
"(",
"atMeta",
".",
"fieldType",
")",
"if",
"(",
"c",
"!=",
"None",
")",
":",
"self",
".",
"setattr",
"(",
"cln",
",",
"c",
")",
"c",
".",
"LoadFromXml",
"(",
"childNode",
",",
"handle",
")"
] | Method updates/fills the object from the xml representation of the managed object. | [
"Method",
"updates",
"/",
"fills",
"the",
"object",
"from",
"the",
"xml",
"representation",
"of",
"the",
"managed",
"object",
"."
] | python | train |
koenedaele/skosprovider | skosprovider/registry.py | https://github.com/koenedaele/skosprovider/blob/7304a37953978ca8227febc2d3cc2b2be178f215/skosprovider/registry.py#L177-L201 | def get_all(self, **kwargs):
'''Get all concepts from all providers.
.. code-block:: python
# get all concepts in all providers.
registry.get_all()
# get all concepts in all providers.
# If possible, display the results with a Dutch label.
registry.get_all(language='nl')
:param string language: Optional. If present, it should be a
:term:`language-tag`. This language-tag is passed on to the
underlying providers and used when selecting the label to display
for each concept.
:returns: a list of :class:`dict`.
Each dict has two keys: id and concepts.
'''
kwarguments = {}
if 'language' in kwargs:
kwarguments['language'] = kwargs['language']
return [{'id': p.get_vocabulary_id(), 'concepts': p.get_all(**kwarguments)}
for p in self.providers.values()] | [
"def",
"get_all",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwarguments",
"=",
"{",
"}",
"if",
"'language'",
"in",
"kwargs",
":",
"kwarguments",
"[",
"'language'",
"]",
"=",
"kwargs",
"[",
"'language'",
"]",
"return",
"[",
"{",
"'id'",
":",
"p",
".",
"get_vocabulary_id",
"(",
")",
",",
"'concepts'",
":",
"p",
".",
"get_all",
"(",
"*",
"*",
"kwarguments",
")",
"}",
"for",
"p",
"in",
"self",
".",
"providers",
".",
"values",
"(",
")",
"]"
] | Get all concepts from all providers.
.. code-block:: python
# get all concepts in all providers.
registry.get_all()
# get all concepts in all providers.
# If possible, display the results with a Dutch label.
registry.get_all(language='nl')
:param string language: Optional. If present, it should be a
:term:`language-tag`. This language-tag is passed on to the
underlying providers and used when selecting the label to display
for each concept.
:returns: a list of :class:`dict`.
Each dict has two keys: id and concepts. | [
"Get",
"all",
"concepts",
"from",
"all",
"providers",
"."
] | python | valid |
HPCC-Cloud-Computing/CAL | calplus/client.py | https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/client.py#L25-L82 | def Client(version=__version__, resource=None, provider=None, **kwargs):
"""Initialize client object based on given version.
:params version: version of CAL, define at setup.cfg
:params resource: resource type
(network, compute, object_storage, block_storage)
:params provider: provider object
:params cloud_config: cloud auth config
:params **kwargs: specific args for resource
:return: class Client
HOW-TO:
The simplest way to create a client instance is initialization::
>> from calplus import client
>> calplus = client.Client(version='1.0.0',
resource='compute',
provider=provider_object,
some_needed_args_for_ComputeClient)
"""
versions = _CLIENTS.keys()
if version not in versions:
raise exceptions.UnsupportedVersion(
'Unknown client version or subject'
)
if provider is None:
raise exceptions.ProviderNotDefined(
'Not define Provider for Client'
)
support_types = CONF.providers.driver_mapper.keys()
if provider.type not in support_types:
raise exceptions.ProviderTypeNotFound(
'Unknow provider.'
)
resources = _CLIENTS[version].keys()
if not resource:
raise exceptions.ResourceNotDefined(
'Not define Resource, choose one: compute, network,\
object_storage, block_storage.'
)
elif resource.lower() not in resources:
raise exceptions.ResourceNotFound(
'Unknow resource: compute, network,\
object_storage, block_storage.'
)
LOG.info('Instantiating {} client ({})' . format(resource, version))
return _CLIENTS[version][resource](
provider.type, provider.config, **kwargs) | [
"def",
"Client",
"(",
"version",
"=",
"__version__",
",",
"resource",
"=",
"None",
",",
"provider",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"versions",
"=",
"_CLIENTS",
".",
"keys",
"(",
")",
"if",
"version",
"not",
"in",
"versions",
":",
"raise",
"exceptions",
".",
"UnsupportedVersion",
"(",
"'Unknown client version or subject'",
")",
"if",
"provider",
"is",
"None",
":",
"raise",
"exceptions",
".",
"ProviderNotDefined",
"(",
"'Not define Provider for Client'",
")",
"support_types",
"=",
"CONF",
".",
"providers",
".",
"driver_mapper",
".",
"keys",
"(",
")",
"if",
"provider",
".",
"type",
"not",
"in",
"support_types",
":",
"raise",
"exceptions",
".",
"ProviderTypeNotFound",
"(",
"'Unknow provider.'",
")",
"resources",
"=",
"_CLIENTS",
"[",
"version",
"]",
".",
"keys",
"(",
")",
"if",
"not",
"resource",
":",
"raise",
"exceptions",
".",
"ResourceNotDefined",
"(",
"'Not define Resource, choose one: compute, network,\\\n object_storage, block_storage.'",
")",
"elif",
"resource",
".",
"lower",
"(",
")",
"not",
"in",
"resources",
":",
"raise",
"exceptions",
".",
"ResourceNotFound",
"(",
"'Unknow resource: compute, network,\\\n object_storage, block_storage.'",
")",
"LOG",
".",
"info",
"(",
"'Instantiating {} client ({})'",
".",
"format",
"(",
"resource",
",",
"version",
")",
")",
"return",
"_CLIENTS",
"[",
"version",
"]",
"[",
"resource",
"]",
"(",
"provider",
".",
"type",
",",
"provider",
".",
"config",
",",
"*",
"*",
"kwargs",
")"
] | Initialize client object based on given version.
:params version: version of CAL, define at setup.cfg
:params resource: resource type
(network, compute, object_storage, block_storage)
:params provider: provider object
:params cloud_config: cloud auth config
:params **kwargs: specific args for resource
:return: class Client
HOW-TO:
The simplest way to create a client instance is initialization::
>> from calplus import client
>> calplus = client.Client(version='1.0.0',
resource='compute',
provider=provider_object,
some_needed_args_for_ComputeClient) | [
"Initialize",
"client",
"object",
"based",
"on",
"given",
"version",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_acm.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_acm.py#L210-L227 | def nacm_rule_list_rule_rule_type_data_node_path(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm")
rule_list = ET.SubElement(nacm, "rule-list")
name_key = ET.SubElement(rule_list, "name")
name_key.text = kwargs.pop('name')
rule = ET.SubElement(rule_list, "rule")
name_key = ET.SubElement(rule, "name")
name_key.text = kwargs.pop('name')
rule_type = ET.SubElement(rule, "rule-type")
data_node = ET.SubElement(rule_type, "data-node")
path = ET.SubElement(data_node, "path")
path.text = kwargs.pop('path')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"nacm_rule_list_rule_rule_type_data_node_path",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"nacm",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"nacm\"",
",",
"xmlns",
"=",
"\"urn:ietf:params:xml:ns:yang:ietf-netconf-acm\"",
")",
"rule_list",
"=",
"ET",
".",
"SubElement",
"(",
"nacm",
",",
"\"rule-list\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"rule_list",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"rule",
"=",
"ET",
".",
"SubElement",
"(",
"rule_list",
",",
"\"rule\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"rule",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"rule_type",
"=",
"ET",
".",
"SubElement",
"(",
"rule",
",",
"\"rule-type\"",
")",
"data_node",
"=",
"ET",
".",
"SubElement",
"(",
"rule_type",
",",
"\"data-node\"",
")",
"path",
"=",
"ET",
".",
"SubElement",
"(",
"data_node",
",",
"\"path\"",
")",
"path",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'path'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
twisted/vertex | vertex/q2q.py | https://github.com/twisted/vertex/blob/feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca/vertex/q2q.py#L907-L927 | def _listen(self, protocols, From, description):
"""
Implementation of L{Listen}.
"""
# The peer is coming from a client-side representation of the user
# described by 'From', and talking *to* a server-side representation of
# the user described by 'From'.
self.verifyCertificateAllowed(From, From)
theirCert = Certificate.peerFromTransport(self.transport)
for protocolName in protocols:
if protocolName.startswith('.'):
raise VerifyError(
"Internal protocols are for server-server use _only_: %r" %
protocolName)
key = (From, protocolName)
value = (self, theirCert, description)
log.msg("%r listening for %r" % key)
self.listeningClient.append((key, value))
self.service.listeningClients.setdefault(key, []).append(value)
return {} | [
"def",
"_listen",
"(",
"self",
",",
"protocols",
",",
"From",
",",
"description",
")",
":",
"# The peer is coming from a client-side representation of the user",
"# described by 'From', and talking *to* a server-side representation of",
"# the user described by 'From'.",
"self",
".",
"verifyCertificateAllowed",
"(",
"From",
",",
"From",
")",
"theirCert",
"=",
"Certificate",
".",
"peerFromTransport",
"(",
"self",
".",
"transport",
")",
"for",
"protocolName",
"in",
"protocols",
":",
"if",
"protocolName",
".",
"startswith",
"(",
"'.'",
")",
":",
"raise",
"VerifyError",
"(",
"\"Internal protocols are for server-server use _only_: %r\"",
"%",
"protocolName",
")",
"key",
"=",
"(",
"From",
",",
"protocolName",
")",
"value",
"=",
"(",
"self",
",",
"theirCert",
",",
"description",
")",
"log",
".",
"msg",
"(",
"\"%r listening for %r\"",
"%",
"key",
")",
"self",
".",
"listeningClient",
".",
"append",
"(",
"(",
"key",
",",
"value",
")",
")",
"self",
".",
"service",
".",
"listeningClients",
".",
"setdefault",
"(",
"key",
",",
"[",
"]",
")",
".",
"append",
"(",
"value",
")",
"return",
"{",
"}"
] | Implementation of L{Listen}. | [
"Implementation",
"of",
"L",
"{",
"Listen",
"}",
"."
] | python | train |
gmr/tinman | tinman/auth/mixins.py | https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/auth/mixins.py#L162-L184 | def github_request(self, path, callback, access_token=None,
post_args=None, **kwargs):
"""Make a request to the GitHub API, passing in the path, a callback,
the access token, optional post arguments and keyword arguments to be
added as values in the request body or URI
"""
url = self._API_URL + path
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(kwargs)
if all_args:
url += "?" + auth.urllib_parse.urlencode(all_args)
callback = self.async_callback(self._on_github_request, callback)
http = self._get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST",
user_agent='Tinman/Tornado',
body=auth.urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, user_agent='Tinman/Tornado', callback=callback) | [
"def",
"github_request",
"(",
"self",
",",
"path",
",",
"callback",
",",
"access_token",
"=",
"None",
",",
"post_args",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"self",
".",
"_API_URL",
"+",
"path",
"all_args",
"=",
"{",
"}",
"if",
"access_token",
":",
"all_args",
"[",
"\"access_token\"",
"]",
"=",
"access_token",
"all_args",
".",
"update",
"(",
"kwargs",
")",
"if",
"all_args",
":",
"url",
"+=",
"\"?\"",
"+",
"auth",
".",
"urllib_parse",
".",
"urlencode",
"(",
"all_args",
")",
"callback",
"=",
"self",
".",
"async_callback",
"(",
"self",
".",
"_on_github_request",
",",
"callback",
")",
"http",
"=",
"self",
".",
"_get_auth_http_client",
"(",
")",
"if",
"post_args",
"is",
"not",
"None",
":",
"http",
".",
"fetch",
"(",
"url",
",",
"method",
"=",
"\"POST\"",
",",
"user_agent",
"=",
"'Tinman/Tornado'",
",",
"body",
"=",
"auth",
".",
"urllib_parse",
".",
"urlencode",
"(",
"post_args",
")",
",",
"callback",
"=",
"callback",
")",
"else",
":",
"http",
".",
"fetch",
"(",
"url",
",",
"user_agent",
"=",
"'Tinman/Tornado'",
",",
"callback",
"=",
"callback",
")"
] | Make a request to the GitHub API, passing in the path, a callback,
the access token, optional post arguments and keyword arguments to be
added as values in the request body or URI | [
"Make",
"a",
"request",
"to",
"the",
"GitHub",
"API",
"passing",
"in",
"the",
"path",
"a",
"callback",
"the",
"access",
"token",
"optional",
"post",
"arguments",
"and",
"keyword",
"arguments",
"to",
"be",
"added",
"as",
"values",
"in",
"the",
"request",
"body",
"or",
"URI"
] | python | train |
PmagPy/PmagPy | pmagpy/contribution_builder.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1154-L1182 | def remove_names(self, dtype):
"""
Remove unneeded name columns ('specimen'/'sample'/etc)
from the specified table.
Parameters
----------
dtype : str
Returns
---------
pandas DataFrame without the unneeded columns
Example
---------
Contribution.tables['specimens'].df = Contribution.remove_names('specimens')
# takes out 'location', 'site', and/or 'sample' columns from the
# specimens dataframe if those columns have been added
"""
if dtype not in self.ancestry:
return
if dtype in self.tables:
# remove extra columns here
self_ind = self.ancestry.index(dtype)
parent_ind = self_ind + 1 if self_ind < (len(self.ancestry) -1) else self_ind
remove = set(self.ancestry).difference([self.ancestry[self_ind], self.ancestry[parent_ind]])
remove = [dtype[:-1] for dtype in remove]
columns = self.tables[dtype].df.columns.difference(remove)
return self.tables[dtype].df[columns] | [
"def",
"remove_names",
"(",
"self",
",",
"dtype",
")",
":",
"if",
"dtype",
"not",
"in",
"self",
".",
"ancestry",
":",
"return",
"if",
"dtype",
"in",
"self",
".",
"tables",
":",
"# remove extra columns here",
"self_ind",
"=",
"self",
".",
"ancestry",
".",
"index",
"(",
"dtype",
")",
"parent_ind",
"=",
"self_ind",
"+",
"1",
"if",
"self_ind",
"<",
"(",
"len",
"(",
"self",
".",
"ancestry",
")",
"-",
"1",
")",
"else",
"self_ind",
"remove",
"=",
"set",
"(",
"self",
".",
"ancestry",
")",
".",
"difference",
"(",
"[",
"self",
".",
"ancestry",
"[",
"self_ind",
"]",
",",
"self",
".",
"ancestry",
"[",
"parent_ind",
"]",
"]",
")",
"remove",
"=",
"[",
"dtype",
"[",
":",
"-",
"1",
"]",
"for",
"dtype",
"in",
"remove",
"]",
"columns",
"=",
"self",
".",
"tables",
"[",
"dtype",
"]",
".",
"df",
".",
"columns",
".",
"difference",
"(",
"remove",
")",
"return",
"self",
".",
"tables",
"[",
"dtype",
"]",
".",
"df",
"[",
"columns",
"]"
] | Remove unneeded name columns ('specimen'/'sample'/etc)
from the specified table.
Parameters
----------
dtype : str
Returns
---------
pandas DataFrame without the unneeded columns
Example
---------
Contribution.tables['specimens'].df = Contribution.remove_names('specimens')
# takes out 'location', 'site', and/or 'sample' columns from the
# specimens dataframe if those columns have been added | [
"Remove",
"unneeded",
"name",
"columns",
"(",
"specimen",
"/",
"sample",
"/",
"etc",
")",
"from",
"the",
"specified",
"table",
"."
] | python | train |
blubberdiblub/eztemplate | eztemplate/__main__.py | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/__main__.py#L403-L421 | def perform_templating(args):
"""Perform templating according to the given arguments."""
engine = engines.engines[args.engine]
if args.vary:
it = variable_outfile_iterator(args.outfiles,
args.infiles,
args.args,
engine)
else:
it = constant_outfile_iterator(args.outfiles,
args.infiles,
args.args)
process_combinations(it, engine,
tolerant=args.tolerant,
read_old=args.read_old,
delete_empty=args.delete_empty,
) | [
"def",
"perform_templating",
"(",
"args",
")",
":",
"engine",
"=",
"engines",
".",
"engines",
"[",
"args",
".",
"engine",
"]",
"if",
"args",
".",
"vary",
":",
"it",
"=",
"variable_outfile_iterator",
"(",
"args",
".",
"outfiles",
",",
"args",
".",
"infiles",
",",
"args",
".",
"args",
",",
"engine",
")",
"else",
":",
"it",
"=",
"constant_outfile_iterator",
"(",
"args",
".",
"outfiles",
",",
"args",
".",
"infiles",
",",
"args",
".",
"args",
")",
"process_combinations",
"(",
"it",
",",
"engine",
",",
"tolerant",
"=",
"args",
".",
"tolerant",
",",
"read_old",
"=",
"args",
".",
"read_old",
",",
"delete_empty",
"=",
"args",
".",
"delete_empty",
",",
")"
] | Perform templating according to the given arguments. | [
"Perform",
"templating",
"according",
"to",
"the",
"given",
"arguments",
"."
] | python | train |
nok/sklearn-porter | sklearn_porter/estimator/classifier/KNeighborsClassifier/__init__.py | https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/estimator/classifier/KNeighborsClassifier/__init__.py#L155-L178 | def predict(self, temp_type):
"""
Transpile the predict method.
Parameters
----------
:param temp_type : string
The kind of export type (embedded, separated, exported).
Returns
-------
:return : string
The transpiled predict method as string.
"""
# Exported:
if temp_type == 'exported':
temp = self.temp('exported.class')
return temp.format(class_name=self.class_name,
method_name=self.method_name,
n_features=self.n_features)
# Separated:
if temp_type == 'separated':
meth = self.create_method()
return self.create_class(meth) | [
"def",
"predict",
"(",
"self",
",",
"temp_type",
")",
":",
"# Exported:",
"if",
"temp_type",
"==",
"'exported'",
":",
"temp",
"=",
"self",
".",
"temp",
"(",
"'exported.class'",
")",
"return",
"temp",
".",
"format",
"(",
"class_name",
"=",
"self",
".",
"class_name",
",",
"method_name",
"=",
"self",
".",
"method_name",
",",
"n_features",
"=",
"self",
".",
"n_features",
")",
"# Separated:",
"if",
"temp_type",
"==",
"'separated'",
":",
"meth",
"=",
"self",
".",
"create_method",
"(",
")",
"return",
"self",
".",
"create_class",
"(",
"meth",
")"
] | Transpile the predict method.
Parameters
----------
:param temp_type : string
The kind of export type (embedded, separated, exported).
Returns
-------
:return : string
The transpiled predict method as string. | [
"Transpile",
"the",
"predict",
"method",
"."
] | python | train |
apache/airflow | airflow/models/taskinstance.py | https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L1363-L1368 | def init_run_context(self, raw=False):
"""
Sets the log context.
"""
self.raw = raw
self._set_context(self) | [
"def",
"init_run_context",
"(",
"self",
",",
"raw",
"=",
"False",
")",
":",
"self",
".",
"raw",
"=",
"raw",
"self",
".",
"_set_context",
"(",
"self",
")"
] | Sets the log context. | [
"Sets",
"the",
"log",
"context",
"."
] | python | test |
saltstack/salt | salt/modules/lxc.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxc.py#L4439-L4476 | def reboot(name, path=None):
'''
Reboot a container.
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt 'minion' lxc.reboot myvm
'''
ret = {'result': True,
'changes': {},
'comment': '{0} rebooted'.format(name)}
does_exist = exists(name, path=path)
if does_exist and (state(name, path=path) == 'running'):
try:
stop(name, path=path)
except (SaltInvocationError, CommandExecutionError) as exc:
ret['comment'] = 'Unable to stop container: {0}'.format(exc)
ret['result'] = False
return ret
if does_exist and (state(name, path=path) != 'running'):
try:
start(name, path=path)
except (SaltInvocationError, CommandExecutionError) as exc:
ret['comment'] = 'Unable to stop container: {0}'.format(exc)
ret['result'] = False
return ret
ret['changes'][name] = 'rebooted'
return ret | [
"def",
"reboot",
"(",
"name",
",",
"path",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'result'",
":",
"True",
",",
"'changes'",
":",
"{",
"}",
",",
"'comment'",
":",
"'{0} rebooted'",
".",
"format",
"(",
"name",
")",
"}",
"does_exist",
"=",
"exists",
"(",
"name",
",",
"path",
"=",
"path",
")",
"if",
"does_exist",
"and",
"(",
"state",
"(",
"name",
",",
"path",
"=",
"path",
")",
"==",
"'running'",
")",
":",
"try",
":",
"stop",
"(",
"name",
",",
"path",
"=",
"path",
")",
"except",
"(",
"SaltInvocationError",
",",
"CommandExecutionError",
")",
"as",
"exc",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Unable to stop container: {0}'",
".",
"format",
"(",
"exc",
")",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret",
"if",
"does_exist",
"and",
"(",
"state",
"(",
"name",
",",
"path",
"=",
"path",
")",
"!=",
"'running'",
")",
":",
"try",
":",
"start",
"(",
"name",
",",
"path",
"=",
"path",
")",
"except",
"(",
"SaltInvocationError",
",",
"CommandExecutionError",
")",
"as",
"exc",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Unable to stop container: {0}'",
".",
"format",
"(",
"exc",
")",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret",
"ret",
"[",
"'changes'",
"]",
"[",
"name",
"]",
"=",
"'rebooted'",
"return",
"ret"
] | Reboot a container.
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt 'minion' lxc.reboot myvm | [
"Reboot",
"a",
"container",
"."
] | python | train |
tanghaibao/jcvi | jcvi/compara/quota.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/quota.py#L64-L99 | def get_2D_overlap(chain, eclusters):
"""
Implements a sweep line algorithm, that has better running time than naive O(n^2):
assume block has x_ends, and y_ends for the bounds
1. sort x_ends, and take a sweep line to scan the x_ends
2. if left end, test y-axis intersection of current block with `active` set;
also put this block in the `active` set
3. if right end, remove block from the `active` set
"""
mergeables = Grouper()
active = set()
x_ends = []
for i, (range_x, range_y, score) in enumerate(eclusters):
chr, left, right = range_x
x_ends.append((chr, left, 0, i)) # 0/1 for left/right-ness
x_ends.append((chr, right, 1, i))
x_ends.sort()
chr_last = ""
for chr, pos, left_right, i in x_ends:
if chr != chr_last:
active.clear()
if left_right == 0:
active.add(i)
for x in active:
# check y-overlap
if range_overlap(eclusters[x][1], eclusters[i][1]):
mergeables.join(x, i)
else: # right end
active.remove(i)
chr_last = chr
return mergeables | [
"def",
"get_2D_overlap",
"(",
"chain",
",",
"eclusters",
")",
":",
"mergeables",
"=",
"Grouper",
"(",
")",
"active",
"=",
"set",
"(",
")",
"x_ends",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"range_x",
",",
"range_y",
",",
"score",
")",
"in",
"enumerate",
"(",
"eclusters",
")",
":",
"chr",
",",
"left",
",",
"right",
"=",
"range_x",
"x_ends",
".",
"append",
"(",
"(",
"chr",
",",
"left",
",",
"0",
",",
"i",
")",
")",
"# 0/1 for left/right-ness",
"x_ends",
".",
"append",
"(",
"(",
"chr",
",",
"right",
",",
"1",
",",
"i",
")",
")",
"x_ends",
".",
"sort",
"(",
")",
"chr_last",
"=",
"\"\"",
"for",
"chr",
",",
"pos",
",",
"left_right",
",",
"i",
"in",
"x_ends",
":",
"if",
"chr",
"!=",
"chr_last",
":",
"active",
".",
"clear",
"(",
")",
"if",
"left_right",
"==",
"0",
":",
"active",
".",
"add",
"(",
"i",
")",
"for",
"x",
"in",
"active",
":",
"# check y-overlap",
"if",
"range_overlap",
"(",
"eclusters",
"[",
"x",
"]",
"[",
"1",
"]",
",",
"eclusters",
"[",
"i",
"]",
"[",
"1",
"]",
")",
":",
"mergeables",
".",
"join",
"(",
"x",
",",
"i",
")",
"else",
":",
"# right end",
"active",
".",
"remove",
"(",
"i",
")",
"chr_last",
"=",
"chr",
"return",
"mergeables"
] | Implements a sweep line algorithm, that has better running time than naive O(n^2):
assume block has x_ends, and y_ends for the bounds
1. sort x_ends, and take a sweep line to scan the x_ends
2. if left end, test y-axis intersection of current block with `active` set;
also put this block in the `active` set
3. if right end, remove block from the `active` set | [
"Implements",
"a",
"sweep",
"line",
"algorithm",
"that",
"has",
"better",
"running",
"time",
"than",
"naive",
"O",
"(",
"n^2",
")",
":",
"assume",
"block",
"has",
"x_ends",
"and",
"y_ends",
"for",
"the",
"bounds"
] | python | train |
jobovy/galpy | galpy/orbit/Orbit.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/Orbit.py#L2321-L2352 | def dec(self,*args,**kwargs):
"""
NAME:
dec
PURPOSE:
return the declination
INPUT:
t - (optional) time at which to get dec (can be Quantity)
obs=[X,Y,Z] - (optional) position of observer (in kpc; entries can be Quantity)
(default=[8.0,0.,0.]) OR Orbit object that corresponds to the orbit of the observer
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
OUTPUT:
dec(t) in deg
HISTORY:
2011-02-23 - Written - Bovy (NYU)
"""
out= self._orb.dec(*args,**kwargs)
if len(out) == 1: return out[0]
else: return out | [
"def",
"dec",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"out",
"=",
"self",
".",
"_orb",
".",
"dec",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"len",
"(",
"out",
")",
"==",
"1",
":",
"return",
"out",
"[",
"0",
"]",
"else",
":",
"return",
"out"
] | NAME:
dec
PURPOSE:
return the declination
INPUT:
t - (optional) time at which to get dec (can be Quantity)
obs=[X,Y,Z] - (optional) position of observer (in kpc; entries can be Quantity)
(default=[8.0,0.,0.]) OR Orbit object that corresponds to the orbit of the observer
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
OUTPUT:
dec(t) in deg
HISTORY:
2011-02-23 - Written - Bovy (NYU) | [
"NAME",
":"
] | python | train |
Pegase745/sqlalchemy-datatables | examples/pyramid_tut/pyramid_tut/views.py | https://github.com/Pegase745/sqlalchemy-datatables/blob/049ab5f98f20ad37926fe86d5528da0c91cd462d/examples/pyramid_tut/pyramid_tut/views.py#L12-L23 | def home(request):
"""Try to connect to database, and list available examples."""
try:
DBSession.query(User).first()
except DBAPIError:
return Response(
conn_err_msg,
content_type="text/plain",
status_int=500,
)
return {"project": "pyramid_tut"} | [
"def",
"home",
"(",
"request",
")",
":",
"try",
":",
"DBSession",
".",
"query",
"(",
"User",
")",
".",
"first",
"(",
")",
"except",
"DBAPIError",
":",
"return",
"Response",
"(",
"conn_err_msg",
",",
"content_type",
"=",
"\"text/plain\"",
",",
"status_int",
"=",
"500",
",",
")",
"return",
"{",
"\"project\"",
":",
"\"pyramid_tut\"",
"}"
] | Try to connect to database, and list available examples. | [
"Try",
"to",
"connect",
"to",
"database",
"and",
"list",
"available",
"examples",
"."
] | python | train |
pandas-dev/pandas | pandas/core/internals/blocks.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2477-L2506 | def _try_coerce_args(self, values, other):
"""
Coerce values and other to int64, with null values converted to
iNaT. values is always ndarray-like, other may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, base-type other
"""
values = values.view('i8')
if isinstance(other, bool):
raise TypeError
elif is_null_datetimelike(other):
other = tslibs.iNaT
elif isinstance(other, (timedelta, np.timedelta64)):
other = Timedelta(other).value
elif hasattr(other, 'dtype') and is_timedelta64_dtype(other):
other = other.astype('i8', copy=False).view('i8')
else:
# coercion issues
# let higher levels handle
raise TypeError(other)
return values, other | [
"def",
"_try_coerce_args",
"(",
"self",
",",
"values",
",",
"other",
")",
":",
"values",
"=",
"values",
".",
"view",
"(",
"'i8'",
")",
"if",
"isinstance",
"(",
"other",
",",
"bool",
")",
":",
"raise",
"TypeError",
"elif",
"is_null_datetimelike",
"(",
"other",
")",
":",
"other",
"=",
"tslibs",
".",
"iNaT",
"elif",
"isinstance",
"(",
"other",
",",
"(",
"timedelta",
",",
"np",
".",
"timedelta64",
")",
")",
":",
"other",
"=",
"Timedelta",
"(",
"other",
")",
".",
"value",
"elif",
"hasattr",
"(",
"other",
",",
"'dtype'",
")",
"and",
"is_timedelta64_dtype",
"(",
"other",
")",
":",
"other",
"=",
"other",
".",
"astype",
"(",
"'i8'",
",",
"copy",
"=",
"False",
")",
".",
"view",
"(",
"'i8'",
")",
"else",
":",
"# coercion issues",
"# let higher levels handle",
"raise",
"TypeError",
"(",
"other",
")",
"return",
"values",
",",
"other"
] | Coerce values and other to int64, with null values converted to
iNaT. values is always ndarray-like, other may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, base-type other | [
"Coerce",
"values",
"and",
"other",
"to",
"int64",
"with",
"null",
"values",
"converted",
"to",
"iNaT",
".",
"values",
"is",
"always",
"ndarray",
"-",
"like",
"other",
"may",
"not",
"be"
] | python | train |
pmacosta/peng | peng/wave_functions.py | https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/wave_functions.py#L1115-L1157 | def iffti(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the imaginary part of the inverse Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.iffti
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform frequency spacing)
.. [[[end]]]
"""
return imag(ifft(wave, npoints, indep_min, indep_max)) | [
"def",
"iffti",
"(",
"wave",
",",
"npoints",
"=",
"None",
",",
"indep_min",
"=",
"None",
",",
"indep_max",
"=",
"None",
")",
":",
"return",
"imag",
"(",
"ifft",
"(",
"wave",
",",
"npoints",
",",
"indep_min",
",",
"indep_max",
")",
")"
] | r"""
Return the imaginary part of the inverse Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.iffti
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform frequency spacing)
.. [[[end]]] | [
"r",
"Return",
"the",
"imaginary",
"part",
"of",
"the",
"inverse",
"Fast",
"Fourier",
"Transform",
"of",
"a",
"waveform",
"."
] | python | test |
manahl/arctic | arctic/tickstore/toplevel.py | https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/tickstore/toplevel.py#L119-L138 | def write(self, symbol, data):
"""
Split the tick data to the underlying collections and write the data to each low
level library.
Args:
symbol (str): the symbol for the timeseries data
data (list of dicts or pandas dataframe): Tick data to write
if a list of dicts is given the list must be in time order and the time must be stored in
an element named 'index' the value of which must be a timezone aware datetime.
For a pandas dataframe the index must be a datetime
"""
# get the full set of date ranges that we have
cursor = self._collection.find()
for res in cursor:
library = self._arctic_lib.arctic[res['library_name']]
dslice = self._slice(data, to_dt(res['start'], mktz('UTC')), to_dt(res['end'], mktz('UTC')))
if len(dslice) != 0:
library.write(symbol, dslice) | [
"def",
"write",
"(",
"self",
",",
"symbol",
",",
"data",
")",
":",
"# get the full set of date ranges that we have",
"cursor",
"=",
"self",
".",
"_collection",
".",
"find",
"(",
")",
"for",
"res",
"in",
"cursor",
":",
"library",
"=",
"self",
".",
"_arctic_lib",
".",
"arctic",
"[",
"res",
"[",
"'library_name'",
"]",
"]",
"dslice",
"=",
"self",
".",
"_slice",
"(",
"data",
",",
"to_dt",
"(",
"res",
"[",
"'start'",
"]",
",",
"mktz",
"(",
"'UTC'",
")",
")",
",",
"to_dt",
"(",
"res",
"[",
"'end'",
"]",
",",
"mktz",
"(",
"'UTC'",
")",
")",
")",
"if",
"len",
"(",
"dslice",
")",
"!=",
"0",
":",
"library",
".",
"write",
"(",
"symbol",
",",
"dslice",
")"
] | Split the tick data to the underlying collections and write the data to each low
level library.
Args:
symbol (str): the symbol for the timeseries data
data (list of dicts or pandas dataframe): Tick data to write
if a list of dicts is given the list must be in time order and the time must be stored in
an element named 'index' the value of which must be a timezone aware datetime.
For a pandas dataframe the index must be a datetime | [
"Split",
"the",
"tick",
"data",
"to",
"the",
"underlying",
"collections",
"and",
"write",
"the",
"data",
"to",
"each",
"low",
"level",
"library",
"."
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L7662-L7673 | def parse_buffer(self, s):
'''input some data bytes, possibly returning a list of new messages'''
m = self.parse_char(s)
if m is None:
return None
ret = [m]
while True:
m = self.parse_char("")
if m is None:
return ret
ret.append(m)
return ret | [
"def",
"parse_buffer",
"(",
"self",
",",
"s",
")",
":",
"m",
"=",
"self",
".",
"parse_char",
"(",
"s",
")",
"if",
"m",
"is",
"None",
":",
"return",
"None",
"ret",
"=",
"[",
"m",
"]",
"while",
"True",
":",
"m",
"=",
"self",
".",
"parse_char",
"(",
"\"\"",
")",
"if",
"m",
"is",
"None",
":",
"return",
"ret",
"ret",
".",
"append",
"(",
"m",
")",
"return",
"ret"
] | input some data bytes, possibly returning a list of new messages | [
"input",
"some",
"data",
"bytes",
"possibly",
"returning",
"a",
"list",
"of",
"new",
"messages"
] | python | train |
iotile/coretools | iotilesensorgraph/iotile/sg/parser/statements/streamer_statement.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/parser/statements/streamer_statement.py#L62-L75 | def execute(self, sensor_graph, scope_stack):
"""Execute this statement on the sensor_graph given the current scope tree.
This adds a single DataStreamer to the current sensor graph
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources.
"""
streamer = DataStreamer(self.selector, self.dest, self.report_format, self.auto, report_type=self.report_type, with_other=self.with_other)
sensor_graph.add_streamer(streamer) | [
"def",
"execute",
"(",
"self",
",",
"sensor_graph",
",",
"scope_stack",
")",
":",
"streamer",
"=",
"DataStreamer",
"(",
"self",
".",
"selector",
",",
"self",
".",
"dest",
",",
"self",
".",
"report_format",
",",
"self",
".",
"auto",
",",
"report_type",
"=",
"self",
".",
"report_type",
",",
"with_other",
"=",
"self",
".",
"with_other",
")",
"sensor_graph",
".",
"add_streamer",
"(",
"streamer",
")"
] | Execute this statement on the sensor_graph given the current scope tree.
This adds a single DataStreamer to the current sensor graph
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources. | [
"Execute",
"this",
"statement",
"on",
"the",
"sensor_graph",
"given",
"the",
"current",
"scope",
"tree",
"."
] | python | train |
mathandy/svgpathtools | svgpathtools/path.py | https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/path.py#L2188-L2203 | def continuous_subpaths(self):
"""Breaks self into its continuous components, returning a list of
continuous subpaths.
I.e.
(all(subpath.iscontinuous() for subpath in self.continuous_subpaths())
and self == concatpaths(self.continuous_subpaths()))
)
"""
subpaths = []
subpath_start = 0
for i in range(len(self) - 1):
if self[i].end != self[(i+1) % len(self)].start:
subpaths.append(Path(*self[subpath_start: i+1]))
subpath_start = i+1
subpaths.append(Path(*self[subpath_start: len(self)]))
return subpaths | [
"def",
"continuous_subpaths",
"(",
"self",
")",
":",
"subpaths",
"=",
"[",
"]",
"subpath_start",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
")",
"-",
"1",
")",
":",
"if",
"self",
"[",
"i",
"]",
".",
"end",
"!=",
"self",
"[",
"(",
"i",
"+",
"1",
")",
"%",
"len",
"(",
"self",
")",
"]",
".",
"start",
":",
"subpaths",
".",
"append",
"(",
"Path",
"(",
"*",
"self",
"[",
"subpath_start",
":",
"i",
"+",
"1",
"]",
")",
")",
"subpath_start",
"=",
"i",
"+",
"1",
"subpaths",
".",
"append",
"(",
"Path",
"(",
"*",
"self",
"[",
"subpath_start",
":",
"len",
"(",
"self",
")",
"]",
")",
")",
"return",
"subpaths"
] | Breaks self into its continuous components, returning a list of
continuous subpaths.
I.e.
(all(subpath.iscontinuous() for subpath in self.continuous_subpaths())
and self == concatpaths(self.continuous_subpaths()))
) | [
"Breaks",
"self",
"into",
"its",
"continuous",
"components",
"returning",
"a",
"list",
"of",
"continuous",
"subpaths",
".",
"I",
".",
"e",
".",
"(",
"all",
"(",
"subpath",
".",
"iscontinuous",
"()",
"for",
"subpath",
"in",
"self",
".",
"continuous_subpaths",
"()",
")",
"and",
"self",
"==",
"concatpaths",
"(",
"self",
".",
"continuous_subpaths",
"()",
"))",
")"
] | python | train |
explosion/spaCy | examples/training/pretrain_textcat.py | https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/pretrain_textcat.py#L41-L51 | def load_textcat_data(limit=0):
"""Load data from the IMDB dataset."""
# Partition off part of the train data for evaluation
train_data, eval_data = thinc.extra.datasets.imdb()
random.shuffle(train_data)
train_data = train_data[-limit:]
texts, labels = zip(*train_data)
eval_texts, eval_labels = zip(*eval_data)
cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in labels]
eval_cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in eval_labels]
return (texts, cats), (eval_texts, eval_cats) | [
"def",
"load_textcat_data",
"(",
"limit",
"=",
"0",
")",
":",
"# Partition off part of the train data for evaluation",
"train_data",
",",
"eval_data",
"=",
"thinc",
".",
"extra",
".",
"datasets",
".",
"imdb",
"(",
")",
"random",
".",
"shuffle",
"(",
"train_data",
")",
"train_data",
"=",
"train_data",
"[",
"-",
"limit",
":",
"]",
"texts",
",",
"labels",
"=",
"zip",
"(",
"*",
"train_data",
")",
"eval_texts",
",",
"eval_labels",
"=",
"zip",
"(",
"*",
"eval_data",
")",
"cats",
"=",
"[",
"{",
"\"POSITIVE\"",
":",
"bool",
"(",
"y",
")",
",",
"\"NEGATIVE\"",
":",
"not",
"bool",
"(",
"y",
")",
"}",
"for",
"y",
"in",
"labels",
"]",
"eval_cats",
"=",
"[",
"{",
"\"POSITIVE\"",
":",
"bool",
"(",
"y",
")",
",",
"\"NEGATIVE\"",
":",
"not",
"bool",
"(",
"y",
")",
"}",
"for",
"y",
"in",
"eval_labels",
"]",
"return",
"(",
"texts",
",",
"cats",
")",
",",
"(",
"eval_texts",
",",
"eval_cats",
")"
] | Load data from the IMDB dataset. | [
"Load",
"data",
"from",
"the",
"IMDB",
"dataset",
"."
] | python | train |
python-security/pyt | pyt/vulnerabilities/vulnerabilities.py | https://github.com/python-security/pyt/blob/efc0cfb716e40e0c8df4098f1cc8cf43723cd31f/pyt/vulnerabilities/vulnerabilities.py#L512-L549 | def find_vulnerabilities(
cfg_list,
blackbox_mapping_file,
sources_and_sinks_file,
interactive=False,
nosec_lines=defaultdict(set)
):
"""Find vulnerabilities in a list of CFGs from a trigger_word_file.
Args:
cfg_list(list[CFG]): the list of CFGs to scan.
blackbox_mapping_file(str)
sources_and_sinks_file(str)
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
Returns:
A list of vulnerabilities.
"""
vulnerabilities = list()
definitions = parse(sources_and_sinks_file)
with open(blackbox_mapping_file) as infile:
blackbox_mapping = json.load(infile)
for cfg in cfg_list:
find_vulnerabilities_in_cfg(
cfg,
definitions,
Lattice(cfg.nodes),
blackbox_mapping,
vulnerabilities,
interactive,
nosec_lines
)
if interactive:
with open(blackbox_mapping_file, 'w') as outfile:
json.dump(blackbox_mapping, outfile, indent=4)
return vulnerabilities | [
"def",
"find_vulnerabilities",
"(",
"cfg_list",
",",
"blackbox_mapping_file",
",",
"sources_and_sinks_file",
",",
"interactive",
"=",
"False",
",",
"nosec_lines",
"=",
"defaultdict",
"(",
"set",
")",
")",
":",
"vulnerabilities",
"=",
"list",
"(",
")",
"definitions",
"=",
"parse",
"(",
"sources_and_sinks_file",
")",
"with",
"open",
"(",
"blackbox_mapping_file",
")",
"as",
"infile",
":",
"blackbox_mapping",
"=",
"json",
".",
"load",
"(",
"infile",
")",
"for",
"cfg",
"in",
"cfg_list",
":",
"find_vulnerabilities_in_cfg",
"(",
"cfg",
",",
"definitions",
",",
"Lattice",
"(",
"cfg",
".",
"nodes",
")",
",",
"blackbox_mapping",
",",
"vulnerabilities",
",",
"interactive",
",",
"nosec_lines",
")",
"if",
"interactive",
":",
"with",
"open",
"(",
"blackbox_mapping_file",
",",
"'w'",
")",
"as",
"outfile",
":",
"json",
".",
"dump",
"(",
"blackbox_mapping",
",",
"outfile",
",",
"indent",
"=",
"4",
")",
"return",
"vulnerabilities"
] | Find vulnerabilities in a list of CFGs from a trigger_word_file.
Args:
cfg_list(list[CFG]): the list of CFGs to scan.
blackbox_mapping_file(str)
sources_and_sinks_file(str)
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
Returns:
A list of vulnerabilities. | [
"Find",
"vulnerabilities",
"in",
"a",
"list",
"of",
"CFGs",
"from",
"a",
"trigger_word_file",
"."
] | python | train |
ponty/EasyProcess | easyprocess/__init__.py | https://github.com/ponty/EasyProcess/blob/81c2923339e09a86b6a2b8c12dc960f1bc67db9c/easyprocess/__init__.py#L248-L272 | def wait(self, timeout=None):
"""Wait for command to complete.
Timeout:
- discussion: http://stackoverflow.com/questions/1191374/subprocess-with-timeout
- implementation: threading
:rtype: self
"""
if timeout is not None:
if not self._thread:
self._thread = threading.Thread(target=self._wait4process)
self._thread.daemon = 1
self._thread.start()
if self._thread:
self._thread.join(timeout=timeout)
self.timeout_happened = self.timeout_happened or self._thread.isAlive()
else:
# no timeout and no existing thread
self._wait4process()
return self | [
"def",
"wait",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"not",
"None",
":",
"if",
"not",
"self",
".",
"_thread",
":",
"self",
".",
"_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_wait4process",
")",
"self",
".",
"_thread",
".",
"daemon",
"=",
"1",
"self",
".",
"_thread",
".",
"start",
"(",
")",
"if",
"self",
".",
"_thread",
":",
"self",
".",
"_thread",
".",
"join",
"(",
"timeout",
"=",
"timeout",
")",
"self",
".",
"timeout_happened",
"=",
"self",
".",
"timeout_happened",
"or",
"self",
".",
"_thread",
".",
"isAlive",
"(",
")",
"else",
":",
"# no timeout and no existing thread",
"self",
".",
"_wait4process",
"(",
")",
"return",
"self"
] | Wait for command to complete.
Timeout:
- discussion: http://stackoverflow.com/questions/1191374/subprocess-with-timeout
- implementation: threading
:rtype: self | [
"Wait",
"for",
"command",
"to",
"complete",
"."
] | python | train |
prompt-toolkit/pymux | pymux/server.py | https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/server.py#L115-L135 | def _run_command(self, packet):
"""
Execute a run command from the client.
"""
create_temp_cli = self.client_states is None
if create_temp_cli:
# If this client doesn't have a CLI. Create a Fake CLI where the
# window containing this pane, is the active one. (The CLI instance
# will be removed before the render function is called, so it doesn't
# hurt too much and makes the code easier.)
pane_id = int(packet['pane_id'])
self._create_app()
with set_app(self.client_state.app):
self.pymux.arrangement.set_active_window_from_pane_id(pane_id)
with set_app(self.client_state.app):
try:
self.pymux.handle_command(packet['data'])
finally:
self._close_connection() | [
"def",
"_run_command",
"(",
"self",
",",
"packet",
")",
":",
"create_temp_cli",
"=",
"self",
".",
"client_states",
"is",
"None",
"if",
"create_temp_cli",
":",
"# If this client doesn't have a CLI. Create a Fake CLI where the",
"# window containing this pane, is the active one. (The CLI instance",
"# will be removed before the render function is called, so it doesn't",
"# hurt too much and makes the code easier.)",
"pane_id",
"=",
"int",
"(",
"packet",
"[",
"'pane_id'",
"]",
")",
"self",
".",
"_create_app",
"(",
")",
"with",
"set_app",
"(",
"self",
".",
"client_state",
".",
"app",
")",
":",
"self",
".",
"pymux",
".",
"arrangement",
".",
"set_active_window_from_pane_id",
"(",
"pane_id",
")",
"with",
"set_app",
"(",
"self",
".",
"client_state",
".",
"app",
")",
":",
"try",
":",
"self",
".",
"pymux",
".",
"handle_command",
"(",
"packet",
"[",
"'data'",
"]",
")",
"finally",
":",
"self",
".",
"_close_connection",
"(",
")"
] | Execute a run command from the client. | [
"Execute",
"a",
"run",
"command",
"from",
"the",
"client",
"."
] | python | train |
klmitch/framer | framer/transport.py | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L307-L342 | def get_extra_info(self, name, default=None):
"""
Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data.
"""
# Handle data we know about
if name in self._handlers:
return self._handlers[name](self)
# Call get_extra_info() on the transport
return self._transport.get_extra_info(name, default=default) | [
"def",
"get_extra_info",
"(",
"self",
",",
"name",
",",
"default",
"=",
"None",
")",
":",
"# Handle data we know about",
"if",
"name",
"in",
"self",
".",
"_handlers",
":",
"return",
"self",
".",
"_handlers",
"[",
"name",
"]",
"(",
"self",
")",
"# Call get_extra_info() on the transport",
"return",
"self",
".",
"_transport",
".",
"get_extra_info",
"(",
"name",
",",
"default",
"=",
"default",
")"
] | Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data. | [
"Called",
"by",
"the",
"client",
"protocol",
"to",
"return",
"optional",
"transport",
"information",
".",
"Information",
"requests",
"not",
"recognized",
"by",
"the",
"FramerProtocol",
"are",
"passed",
"on",
"to",
"the",
"underlying",
"transport",
"."
] | python | train |
tjcsl/cslbot | cslbot/helpers/core.py | https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/core.py#L183-L207 | def reload_handler(self, c, e):
"""This handles reloads."""
cmd = self.is_reload(e)
cmdchar = self.config['core']['cmdchar']
if cmd is not None:
# If we're in a minimal reload state, only the owner can do stuff, as we can't rely on the db working.
if self.reload_event.set():
admins = [self.config['auth']['owner']]
else:
with self.handler.db.session_scope() as session:
admins = [x.nick for x in session.query(orm.Permissions).all()]
if e.source.nick not in admins:
c.privmsg(self.get_target(e), "Nope, not gonna do it.")
return
importlib.reload(reloader)
self.reload_event.set()
cmdargs = cmd[len('%sreload' % cmdchar) + 1:]
try:
if reloader.do_reload(self, e, cmdargs):
if self.config.getboolean('feature', 'server'):
self.server = server.init_server(self)
self.reload_event.clear()
logging.info("Successfully reloaded")
except Exception as ex:
backtrace.handle_traceback(ex, c, self.get_target(e), self.config) | [
"def",
"reload_handler",
"(",
"self",
",",
"c",
",",
"e",
")",
":",
"cmd",
"=",
"self",
".",
"is_reload",
"(",
"e",
")",
"cmdchar",
"=",
"self",
".",
"config",
"[",
"'core'",
"]",
"[",
"'cmdchar'",
"]",
"if",
"cmd",
"is",
"not",
"None",
":",
"# If we're in a minimal reload state, only the owner can do stuff, as we can't rely on the db working.",
"if",
"self",
".",
"reload_event",
".",
"set",
"(",
")",
":",
"admins",
"=",
"[",
"self",
".",
"config",
"[",
"'auth'",
"]",
"[",
"'owner'",
"]",
"]",
"else",
":",
"with",
"self",
".",
"handler",
".",
"db",
".",
"session_scope",
"(",
")",
"as",
"session",
":",
"admins",
"=",
"[",
"x",
".",
"nick",
"for",
"x",
"in",
"session",
".",
"query",
"(",
"orm",
".",
"Permissions",
")",
".",
"all",
"(",
")",
"]",
"if",
"e",
".",
"source",
".",
"nick",
"not",
"in",
"admins",
":",
"c",
".",
"privmsg",
"(",
"self",
".",
"get_target",
"(",
"e",
")",
",",
"\"Nope, not gonna do it.\"",
")",
"return",
"importlib",
".",
"reload",
"(",
"reloader",
")",
"self",
".",
"reload_event",
".",
"set",
"(",
")",
"cmdargs",
"=",
"cmd",
"[",
"len",
"(",
"'%sreload'",
"%",
"cmdchar",
")",
"+",
"1",
":",
"]",
"try",
":",
"if",
"reloader",
".",
"do_reload",
"(",
"self",
",",
"e",
",",
"cmdargs",
")",
":",
"if",
"self",
".",
"config",
".",
"getboolean",
"(",
"'feature'",
",",
"'server'",
")",
":",
"self",
".",
"server",
"=",
"server",
".",
"init_server",
"(",
"self",
")",
"self",
".",
"reload_event",
".",
"clear",
"(",
")",
"logging",
".",
"info",
"(",
"\"Successfully reloaded\"",
")",
"except",
"Exception",
"as",
"ex",
":",
"backtrace",
".",
"handle_traceback",
"(",
"ex",
",",
"c",
",",
"self",
".",
"get_target",
"(",
"e",
")",
",",
"self",
".",
"config",
")"
] | This handles reloads. | [
"This",
"handles",
"reloads",
"."
] | python | train |
numenta/nupic | src/nupic/swarming/permutations_runner.py | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L308-L341 | def runWithPermutationsScript(permutationsFilePath, options,
outputLabel, permWorkDir):
"""
Starts a swarm, given a path to a permutations.py script.
This function is meant to be used with a CLI wrapper that passes command line
arguments in through the options parameter.
@param permutationsFilePath {string} Path to permutations.py.
@param options {dict} CLI options.
@param outputLabel {string} Label for output.
@param permWorkDir {string} Location of working directory.
@returns {object} Model parameters.
"""
global g_currentVerbosityLevel
if "verbosityCount" in options:
g_currentVerbosityLevel = options["verbosityCount"]
del options["verbosityCount"]
else:
g_currentVerbosityLevel = 1
_setupInterruptHandling()
options["permutationsScriptPath"] = permutationsFilePath
options["outputLabel"] = outputLabel
options["outDir"] = permWorkDir
options["permWorkDir"] = permWorkDir
# Assume it's a permutations python script
runOptions = _injectDefaultOptions(options)
_validateOptions(runOptions)
return _runAction(runOptions) | [
"def",
"runWithPermutationsScript",
"(",
"permutationsFilePath",
",",
"options",
",",
"outputLabel",
",",
"permWorkDir",
")",
":",
"global",
"g_currentVerbosityLevel",
"if",
"\"verbosityCount\"",
"in",
"options",
":",
"g_currentVerbosityLevel",
"=",
"options",
"[",
"\"verbosityCount\"",
"]",
"del",
"options",
"[",
"\"verbosityCount\"",
"]",
"else",
":",
"g_currentVerbosityLevel",
"=",
"1",
"_setupInterruptHandling",
"(",
")",
"options",
"[",
"\"permutationsScriptPath\"",
"]",
"=",
"permutationsFilePath",
"options",
"[",
"\"outputLabel\"",
"]",
"=",
"outputLabel",
"options",
"[",
"\"outDir\"",
"]",
"=",
"permWorkDir",
"options",
"[",
"\"permWorkDir\"",
"]",
"=",
"permWorkDir",
"# Assume it's a permutations python script",
"runOptions",
"=",
"_injectDefaultOptions",
"(",
"options",
")",
"_validateOptions",
"(",
"runOptions",
")",
"return",
"_runAction",
"(",
"runOptions",
")"
] | Starts a swarm, given a path to a permutations.py script.
This function is meant to be used with a CLI wrapper that passes command line
arguments in through the options parameter.
@param permutationsFilePath {string} Path to permutations.py.
@param options {dict} CLI options.
@param outputLabel {string} Label for output.
@param permWorkDir {string} Location of working directory.
@returns {object} Model parameters. | [
"Starts",
"a",
"swarm",
"given",
"a",
"path",
"to",
"a",
"permutations",
".",
"py",
"script",
"."
] | python | valid |
briancappello/py-yaml-fixtures | py_yaml_fixtures/fixtures_loader.py | https://github.com/briancappello/py-yaml-fixtures/blob/60c37daf58ec3b1c4bba637889949523a69b8a73/py_yaml_fixtures/fixtures_loader.py#L152-L164 | def _load_from_yaml(self, filename: str, model_identifiers: Dict[str, List[str]]):
"""
Load fixtures from the given filename
"""
class_name = filename[:filename.rfind('.')]
rendered_yaml = self.env.get_template(filename).render(
model_identifiers=model_identifiers)
fixture_data, self.relationships[class_name] = self._post_process_yaml_data(
yaml.load(rendered_yaml),
self.factory.get_relationships(class_name))
for identifier_key, data in fixture_data.items():
self.model_fixtures[class_name][identifier_key] = data | [
"def",
"_load_from_yaml",
"(",
"self",
",",
"filename",
":",
"str",
",",
"model_identifiers",
":",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
")",
":",
"class_name",
"=",
"filename",
"[",
":",
"filename",
".",
"rfind",
"(",
"'.'",
")",
"]",
"rendered_yaml",
"=",
"self",
".",
"env",
".",
"get_template",
"(",
"filename",
")",
".",
"render",
"(",
"model_identifiers",
"=",
"model_identifiers",
")",
"fixture_data",
",",
"self",
".",
"relationships",
"[",
"class_name",
"]",
"=",
"self",
".",
"_post_process_yaml_data",
"(",
"yaml",
".",
"load",
"(",
"rendered_yaml",
")",
",",
"self",
".",
"factory",
".",
"get_relationships",
"(",
"class_name",
")",
")",
"for",
"identifier_key",
",",
"data",
"in",
"fixture_data",
".",
"items",
"(",
")",
":",
"self",
".",
"model_fixtures",
"[",
"class_name",
"]",
"[",
"identifier_key",
"]",
"=",
"data"
] | Load fixtures from the given filename | [
"Load",
"fixtures",
"from",
"the",
"given",
"filename"
] | python | train |
saltstack/salt | salt/modules/mac_timezone.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_timezone.py#L31-L58 | def _get_date_time_format(dt_string):
'''
Function that detects the date/time format for the string passed.
:param str dt_string:
A date/time string
:return: The format of the passed dt_string
:rtype: str
:raises: SaltInvocationError on Invalid Date/Time string
'''
valid_formats = [
'%H:%M',
'%H:%M:%S',
'%m:%d:%y',
'%m:%d:%Y',
'%m/%d/%y',
'%m/%d/%Y'
]
for dt_format in valid_formats:
try:
datetime.strptime(dt_string, dt_format)
return dt_format
except ValueError:
continue
msg = 'Invalid Date/Time Format: {0}'.format(dt_string)
raise SaltInvocationError(msg) | [
"def",
"_get_date_time_format",
"(",
"dt_string",
")",
":",
"valid_formats",
"=",
"[",
"'%H:%M'",
",",
"'%H:%M:%S'",
",",
"'%m:%d:%y'",
",",
"'%m:%d:%Y'",
",",
"'%m/%d/%y'",
",",
"'%m/%d/%Y'",
"]",
"for",
"dt_format",
"in",
"valid_formats",
":",
"try",
":",
"datetime",
".",
"strptime",
"(",
"dt_string",
",",
"dt_format",
")",
"return",
"dt_format",
"except",
"ValueError",
":",
"continue",
"msg",
"=",
"'Invalid Date/Time Format: {0}'",
".",
"format",
"(",
"dt_string",
")",
"raise",
"SaltInvocationError",
"(",
"msg",
")"
] | Function that detects the date/time format for the string passed.
:param str dt_string:
A date/time string
:return: The format of the passed dt_string
:rtype: str
:raises: SaltInvocationError on Invalid Date/Time string | [
"Function",
"that",
"detects",
"the",
"date",
"/",
"time",
"format",
"for",
"the",
"string",
"passed",
"."
] | python | train |
bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L86-L100 | def _add_supplemental_bams(data):
"""Add supplemental files produced by alignment, useful for structural
variant calling.
"""
file_key = "work_bam"
if data.get(file_key):
for supext in ["disc", "sr"]:
base, ext = os.path.splitext(data[file_key])
test_file = "%s-%s%s" % (base, supext, ext)
if os.path.exists(test_file):
sup_key = file_key + "_plus"
if sup_key not in data:
data[sup_key] = {}
data[sup_key][supext] = test_file
return data | [
"def",
"_add_supplemental_bams",
"(",
"data",
")",
":",
"file_key",
"=",
"\"work_bam\"",
"if",
"data",
".",
"get",
"(",
"file_key",
")",
":",
"for",
"supext",
"in",
"[",
"\"disc\"",
",",
"\"sr\"",
"]",
":",
"base",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"data",
"[",
"file_key",
"]",
")",
"test_file",
"=",
"\"%s-%s%s\"",
"%",
"(",
"base",
",",
"supext",
",",
"ext",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"test_file",
")",
":",
"sup_key",
"=",
"file_key",
"+",
"\"_plus\"",
"if",
"sup_key",
"not",
"in",
"data",
":",
"data",
"[",
"sup_key",
"]",
"=",
"{",
"}",
"data",
"[",
"sup_key",
"]",
"[",
"supext",
"]",
"=",
"test_file",
"return",
"data"
] | Add supplemental files produced by alignment, useful for structural
variant calling. | [
"Add",
"supplemental",
"files",
"produced",
"by",
"alignment",
"useful",
"for",
"structural",
"variant",
"calling",
"."
] | python | train |
kylejusticemagnuson/pyti | pyti/volume_index.py | https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/volume_index.py#L35-L54 | def negative_volume_index(close_data, volume):
"""
Negative Volume Index (NVI).
Formula:
NVI0 = 1
IF Vt < Vt-1
NVIt = NVIt-1 + (CLOSEt - CLOSEt-1 / CLOSEt-1 * NVIt-1)
ELSE:
NVIt = NVIt-1
"""
catch_errors.check_for_input_len_diff(close_data, volume)
nvi = np.zeros(len(volume))
nvi[0] = 1
for idx in range(1, len(volume)):
if volume[idx] < volume[idx-1]:
nvi[idx] = volume_index_helper(nvi, idx, close_data)
else:
nvi[idx] = nvi[idx-1]
return nvi | [
"def",
"negative_volume_index",
"(",
"close_data",
",",
"volume",
")",
":",
"catch_errors",
".",
"check_for_input_len_diff",
"(",
"close_data",
",",
"volume",
")",
"nvi",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"volume",
")",
")",
"nvi",
"[",
"0",
"]",
"=",
"1",
"for",
"idx",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"volume",
")",
")",
":",
"if",
"volume",
"[",
"idx",
"]",
"<",
"volume",
"[",
"idx",
"-",
"1",
"]",
":",
"nvi",
"[",
"idx",
"]",
"=",
"volume_index_helper",
"(",
"nvi",
",",
"idx",
",",
"close_data",
")",
"else",
":",
"nvi",
"[",
"idx",
"]",
"=",
"nvi",
"[",
"idx",
"-",
"1",
"]",
"return",
"nvi"
] | Negative Volume Index (NVI).
Formula:
NVI0 = 1
IF Vt < Vt-1
NVIt = NVIt-1 + (CLOSEt - CLOSEt-1 / CLOSEt-1 * NVIt-1)
ELSE:
NVIt = NVIt-1 | [
"Negative",
"Volume",
"Index",
"(",
"NVI",
")",
"."
] | python | train |
gem/oq-engine | openquake/hazardlib/gsim/abrahamson_silva_2008.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/abrahamson_silva_2008.py#L327-L334 | def _compute_sigma_b(self, C, mag, vs30measured):
"""
Equation 23, page 81.
"""
sigma_0 = self._compute_sigma_0(C, mag, vs30measured)
sigma_amp = self.CONSTS['sigma_amp']
return np.sqrt(sigma_0 ** 2 - sigma_amp ** 2) | [
"def",
"_compute_sigma_b",
"(",
"self",
",",
"C",
",",
"mag",
",",
"vs30measured",
")",
":",
"sigma_0",
"=",
"self",
".",
"_compute_sigma_0",
"(",
"C",
",",
"mag",
",",
"vs30measured",
")",
"sigma_amp",
"=",
"self",
".",
"CONSTS",
"[",
"'sigma_amp'",
"]",
"return",
"np",
".",
"sqrt",
"(",
"sigma_0",
"**",
"2",
"-",
"sigma_amp",
"**",
"2",
")"
] | Equation 23, page 81. | [
"Equation",
"23",
"page",
"81",
"."
] | python | train |
tensorflow/probability | tensorflow_probability/python/mcmc/diagnostic.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/diagnostic.py#L388-L393 | def _axis_size(x, axis=None):
"""Get number of elements of `x` in `axis`, as type `x.dtype`."""
if axis is None:
return tf.cast(tf.size(input=x), x.dtype)
return tf.cast(
tf.reduce_prod(input_tensor=tf.gather(tf.shape(input=x), axis)), x.dtype) | [
"def",
"_axis_size",
"(",
"x",
",",
"axis",
"=",
"None",
")",
":",
"if",
"axis",
"is",
"None",
":",
"return",
"tf",
".",
"cast",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"x",
")",
",",
"x",
".",
"dtype",
")",
"return",
"tf",
".",
"cast",
"(",
"tf",
".",
"reduce_prod",
"(",
"input_tensor",
"=",
"tf",
".",
"gather",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
",",
"axis",
")",
")",
",",
"x",
".",
"dtype",
")"
] | Get number of elements of `x` in `axis`, as type `x.dtype`. | [
"Get",
"number",
"of",
"elements",
"of",
"x",
"in",
"axis",
"as",
"type",
"x",
".",
"dtype",
"."
] | python | test |
DataDog/integrations-core | tokumx/datadog_checks/tokumx/vendor/bson/json_util.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/bson/json_util.py#L723-L729 | def _parse_canonical_minkey(doc):
"""Decode a JSON MinKey to bson.min_key.MinKey."""
if doc['$minKey'] is not 1:
raise TypeError('$minKey value must be 1: %s' % (doc,))
if len(doc) != 1:
raise TypeError('Bad $minKey, extra field(s): %s' % (doc,))
return MinKey() | [
"def",
"_parse_canonical_minkey",
"(",
"doc",
")",
":",
"if",
"doc",
"[",
"'$minKey'",
"]",
"is",
"not",
"1",
":",
"raise",
"TypeError",
"(",
"'$minKey value must be 1: %s'",
"%",
"(",
"doc",
",",
")",
")",
"if",
"len",
"(",
"doc",
")",
"!=",
"1",
":",
"raise",
"TypeError",
"(",
"'Bad $minKey, extra field(s): %s'",
"%",
"(",
"doc",
",",
")",
")",
"return",
"MinKey",
"(",
")"
] | Decode a JSON MinKey to bson.min_key.MinKey. | [
"Decode",
"a",
"JSON",
"MinKey",
"to",
"bson",
".",
"min_key",
".",
"MinKey",
"."
] | python | train |
InQuest/python-sandboxapi | sandboxapi/fireeye.py | https://github.com/InQuest/python-sandboxapi/blob/9bad73f453e25d7d23e7b4b1ae927f44a35a5bc3/sandboxapi/fireeye.py#L31-L77 | def _request(self, uri, method='GET', params=None, files=None, headers=None, auth=None):
"""Override the parent _request method.
We have to do this here because FireEye requires some extra
authentication steps. On each request we pass the auth headers, and
if the session has expired, we automatically reauthenticate.
"""
if headers:
headers['Accept'] = 'application/json'
else:
headers = {
'Accept': 'application/json',
}
if not self.api_token:
# need to log in
response = sandboxapi.SandboxAPI._request(self, '/auth/login', 'POST', headers=headers,
auth=HTTPBasicAuth(self.username, self.password))
if response.status_code != 200:
raise sandboxapi.SandboxError("Can't log in, HTTP Error {e}".format(e=response.status_code))
# we are now logged in, save the token
self.api_token = response.headers.get('X-FeApi-Token')
headers['X-FeApi-Token'] = self.api_token
response = sandboxapi.SandboxAPI._request(self, uri, method, params, files, headers)
# handle session timeout
unauthorized = False
try:
if json.loads(response.content.decode('utf-8'))['fireeyeapis']['httpStatus'] == 401:
unauthorized = True
except (ValueError, KeyError, TypeError):
# non-JSON response, or no such keys.
pass
if response.status_code == 401 or unauthorized:
self.api_token = None
try:
headers.pop('X-FeApi-Token')
except KeyError:
pass
# recurse
return self._request(uri, method, params, files, headers)
return response | [
"def",
"_request",
"(",
"self",
",",
"uri",
",",
"method",
"=",
"'GET'",
",",
"params",
"=",
"None",
",",
"files",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"auth",
"=",
"None",
")",
":",
"if",
"headers",
":",
"headers",
"[",
"'Accept'",
"]",
"=",
"'application/json'",
"else",
":",
"headers",
"=",
"{",
"'Accept'",
":",
"'application/json'",
",",
"}",
"if",
"not",
"self",
".",
"api_token",
":",
"# need to log in",
"response",
"=",
"sandboxapi",
".",
"SandboxAPI",
".",
"_request",
"(",
"self",
",",
"'/auth/login'",
",",
"'POST'",
",",
"headers",
"=",
"headers",
",",
"auth",
"=",
"HTTPBasicAuth",
"(",
"self",
".",
"username",
",",
"self",
".",
"password",
")",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"raise",
"sandboxapi",
".",
"SandboxError",
"(",
"\"Can't log in, HTTP Error {e}\"",
".",
"format",
"(",
"e",
"=",
"response",
".",
"status_code",
")",
")",
"# we are now logged in, save the token",
"self",
".",
"api_token",
"=",
"response",
".",
"headers",
".",
"get",
"(",
"'X-FeApi-Token'",
")",
"headers",
"[",
"'X-FeApi-Token'",
"]",
"=",
"self",
".",
"api_token",
"response",
"=",
"sandboxapi",
".",
"SandboxAPI",
".",
"_request",
"(",
"self",
",",
"uri",
",",
"method",
",",
"params",
",",
"files",
",",
"headers",
")",
"# handle session timeout",
"unauthorized",
"=",
"False",
"try",
":",
"if",
"json",
".",
"loads",
"(",
"response",
".",
"content",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"[",
"'fireeyeapis'",
"]",
"[",
"'httpStatus'",
"]",
"==",
"401",
":",
"unauthorized",
"=",
"True",
"except",
"(",
"ValueError",
",",
"KeyError",
",",
"TypeError",
")",
":",
"# non-JSON response, or no such keys.",
"pass",
"if",
"response",
".",
"status_code",
"==",
"401",
"or",
"unauthorized",
":",
"self",
".",
"api_token",
"=",
"None",
"try",
":",
"headers",
".",
"pop",
"(",
"'X-FeApi-Token'",
")",
"except",
"KeyError",
":",
"pass",
"# recurse",
"return",
"self",
".",
"_request",
"(",
"uri",
",",
"method",
",",
"params",
",",
"files",
",",
"headers",
")",
"return",
"response"
] | Override the parent _request method.
We have to do this here because FireEye requires some extra
authentication steps. On each request we pass the auth headers, and
if the session has expired, we automatically reauthenticate. | [
"Override",
"the",
"parent",
"_request",
"method",
"."
] | python | train |
keon/algorithms | algorithms/sort/counting_sort.py | https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/sort/counting_sort.py#L1-L36 | def counting_sort(arr):
"""
Counting_sort
Sorting a array which has no element greater than k
Creating a new temp_arr,where temp_arr[i] contain the number of
element less than or equal to i in the arr
Then placing the number i into a correct position in the result_arr
return the result_arr
Complexity: 0(n)
"""
m = min(arr)
# in case there are negative elements, change the array to all positive element
different = 0
if m < 0:
# save the change, so that we can convert the array back to all positive number
different = -m
for i in range(len(arr)):
arr[i] += -m
k = max(arr)
temp_arr = [0] * (k + 1)
for i in range(0, len(arr)):
temp_arr[arr[i]] = temp_arr[arr[i]] + 1
# temp_array[i] contain the times the number i appear in arr
for i in range(1, k + 1):
temp_arr[i] = temp_arr[i] + temp_arr[i - 1]
# temp_array[i] contain the number of element less than or equal i in arr
result_arr = arr.copy()
# creating a result_arr an put the element in a correct positon
for i in range(len(arr) - 1, -1, -1):
result_arr[temp_arr[arr[i]] - 1] = arr[i] - different
temp_arr[arr[i]] = temp_arr[arr[i]] - 1
return result_arr | [
"def",
"counting_sort",
"(",
"arr",
")",
":",
"m",
"=",
"min",
"(",
"arr",
")",
"# in case there are negative elements, change the array to all positive element",
"different",
"=",
"0",
"if",
"m",
"<",
"0",
":",
"# save the change, so that we can convert the array back to all positive number",
"different",
"=",
"-",
"m",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"arr",
")",
")",
":",
"arr",
"[",
"i",
"]",
"+=",
"-",
"m",
"k",
"=",
"max",
"(",
"arr",
")",
"temp_arr",
"=",
"[",
"0",
"]",
"*",
"(",
"k",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"arr",
")",
")",
":",
"temp_arr",
"[",
"arr",
"[",
"i",
"]",
"]",
"=",
"temp_arr",
"[",
"arr",
"[",
"i",
"]",
"]",
"+",
"1",
"# temp_array[i] contain the times the number i appear in arr",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"k",
"+",
"1",
")",
":",
"temp_arr",
"[",
"i",
"]",
"=",
"temp_arr",
"[",
"i",
"]",
"+",
"temp_arr",
"[",
"i",
"-",
"1",
"]",
"# temp_array[i] contain the number of element less than or equal i in arr",
"result_arr",
"=",
"arr",
".",
"copy",
"(",
")",
"# creating a result_arr an put the element in a correct positon",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"arr",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"result_arr",
"[",
"temp_arr",
"[",
"arr",
"[",
"i",
"]",
"]",
"-",
"1",
"]",
"=",
"arr",
"[",
"i",
"]",
"-",
"different",
"temp_arr",
"[",
"arr",
"[",
"i",
"]",
"]",
"=",
"temp_arr",
"[",
"arr",
"[",
"i",
"]",
"]",
"-",
"1",
"return",
"result_arr"
] | Counting_sort
Sorting a array which has no element greater than k
Creating a new temp_arr,where temp_arr[i] contain the number of
element less than or equal to i in the arr
Then placing the number i into a correct position in the result_arr
return the result_arr
Complexity: 0(n) | [
"Counting_sort",
"Sorting",
"a",
"array",
"which",
"has",
"no",
"element",
"greater",
"than",
"k",
"Creating",
"a",
"new",
"temp_arr",
"where",
"temp_arr",
"[",
"i",
"]",
"contain",
"the",
"number",
"of",
"element",
"less",
"than",
"or",
"equal",
"to",
"i",
"in",
"the",
"arr",
"Then",
"placing",
"the",
"number",
"i",
"into",
"a",
"correct",
"position",
"in",
"the",
"result_arr",
"return",
"the",
"result_arr",
"Complexity",
":",
"0",
"(",
"n",
")"
] | python | train |
tensorflow/datasets | tensorflow_datasets/text/xnli.py | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/xnli.py#L107-L123 | def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
rows_per_pair_id = collections.defaultdict(list)
with tf.io.gfile.GFile(filepath) as f:
reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
rows_per_pair_id[row['pairID']].append(row)
for rows in six.itervalues(rows_per_pair_id):
premise = {row['language']: row['sentence1'] for row in rows}
hypothesis = {row['language']: row['sentence2'] for row in rows}
yield {
'premise': premise,
'hypothesis': hypothesis,
'label': rows[0]['gold_label'],
} | [
"def",
"_generate_examples",
"(",
"self",
",",
"filepath",
")",
":",
"rows_per_pair_id",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
")",
"as",
"f",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_NONE",
")",
"for",
"row",
"in",
"reader",
":",
"rows_per_pair_id",
"[",
"row",
"[",
"'pairID'",
"]",
"]",
".",
"append",
"(",
"row",
")",
"for",
"rows",
"in",
"six",
".",
"itervalues",
"(",
"rows_per_pair_id",
")",
":",
"premise",
"=",
"{",
"row",
"[",
"'language'",
"]",
":",
"row",
"[",
"'sentence1'",
"]",
"for",
"row",
"in",
"rows",
"}",
"hypothesis",
"=",
"{",
"row",
"[",
"'language'",
"]",
":",
"row",
"[",
"'sentence2'",
"]",
"for",
"row",
"in",
"rows",
"}",
"yield",
"{",
"'premise'",
":",
"premise",
",",
"'hypothesis'",
":",
"hypothesis",
",",
"'label'",
":",
"rows",
"[",
"0",
"]",
"[",
"'gold_label'",
"]",
",",
"}"
] | This function returns the examples in the raw (text) form. | [
"This",
"function",
"returns",
"the",
"examples",
"in",
"the",
"raw",
"(",
"text",
")",
"form",
"."
] | python | train |
litl/rauth | rauth/oauth.py | https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/oauth.py#L228-L248 | def sign(self, consumer_secret, access_token_secret, method, url,
oauth_params, req_kwargs):
'''Sign request using PLAINTEXT method.
:param consumer_secret: Consumer secret.
:type consumer_secret: str
:param access_token_secret: Access token secret (optional).
:type access_token_secret: str
:param method: Unused
:type method: str
:param url: Unused
:type url: str
:param oauth_params: Unused
:type oauth_params: dict
:param req_kwargs: Unused
:type req_kwargs: dict
'''
key = self._escape(consumer_secret) + b'&'
if access_token_secret:
key += self._escape(access_token_secret)
return key.decode() | [
"def",
"sign",
"(",
"self",
",",
"consumer_secret",
",",
"access_token_secret",
",",
"method",
",",
"url",
",",
"oauth_params",
",",
"req_kwargs",
")",
":",
"key",
"=",
"self",
".",
"_escape",
"(",
"consumer_secret",
")",
"+",
"b'&'",
"if",
"access_token_secret",
":",
"key",
"+=",
"self",
".",
"_escape",
"(",
"access_token_secret",
")",
"return",
"key",
".",
"decode",
"(",
")"
] | Sign request using PLAINTEXT method.
:param consumer_secret: Consumer secret.
:type consumer_secret: str
:param access_token_secret: Access token secret (optional).
:type access_token_secret: str
:param method: Unused
:type method: str
:param url: Unused
:type url: str
:param oauth_params: Unused
:type oauth_params: dict
:param req_kwargs: Unused
:type req_kwargs: dict | [
"Sign",
"request",
"using",
"PLAINTEXT",
"method",
"."
] | python | train |
heitzmann/gdspy | gdspy/__init__.py | https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/gdspy/__init__.py#L197-L219 | def rotate(self, angle, center=(0, 0)):
"""
Rotate this object.
Parameters
----------
angle : number
The angle of rotation (in *radians*).
center : array-like[2]
Center point for the rotation.
Returns
-------
out : ``PolygonSet``
This object.
"""
ca = numpy.cos(angle)
sa = numpy.sin(angle)
sa = numpy.array((-sa, sa))
c0 = numpy.array(center)
self.polygons = [(points - c0) * ca + (points - c0)[:, ::-1] * sa + c0
for points in self.polygons]
return self | [
"def",
"rotate",
"(",
"self",
",",
"angle",
",",
"center",
"=",
"(",
"0",
",",
"0",
")",
")",
":",
"ca",
"=",
"numpy",
".",
"cos",
"(",
"angle",
")",
"sa",
"=",
"numpy",
".",
"sin",
"(",
"angle",
")",
"sa",
"=",
"numpy",
".",
"array",
"(",
"(",
"-",
"sa",
",",
"sa",
")",
")",
"c0",
"=",
"numpy",
".",
"array",
"(",
"center",
")",
"self",
".",
"polygons",
"=",
"[",
"(",
"points",
"-",
"c0",
")",
"*",
"ca",
"+",
"(",
"points",
"-",
"c0",
")",
"[",
":",
",",
":",
":",
"-",
"1",
"]",
"*",
"sa",
"+",
"c0",
"for",
"points",
"in",
"self",
".",
"polygons",
"]",
"return",
"self"
] | Rotate this object.
Parameters
----------
angle : number
The angle of rotation (in *radians*).
center : array-like[2]
Center point for the rotation.
Returns
-------
out : ``PolygonSet``
This object. | [
"Rotate",
"this",
"object",
"."
] | python | train |
RudolfCardinal/pythonlib | cardinal_pythonlib/rnc_db.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L937-L955 | def get_sql_insert_or_update(table: str,
fieldlist: Sequence[str],
delims: Tuple[str, str] = ("", "")) -> str:
"""Returns ?-marked SQL for an INSERT-or-if-duplicate-key-UPDATE statement.
"""
# http://stackoverflow.com/questions/4205181
return """
INSERT INTO {table} ({fields})
VALUES ({placeholders})
ON DUPLICATE KEY UPDATE {updatelist}
""".format(
table=delimit(table, delims),
fields=",".join([delimit(x, delims) for x in fieldlist]),
placeholders=",".join(["?"] * len(fieldlist)),
updatelist=",".join(
["{field}=VALUES({field})".format(field=delimit(x, delims))
for x in fieldlist]
),
) | [
"def",
"get_sql_insert_or_update",
"(",
"table",
":",
"str",
",",
"fieldlist",
":",
"Sequence",
"[",
"str",
"]",
",",
"delims",
":",
"Tuple",
"[",
"str",
",",
"str",
"]",
"=",
"(",
"\"\"",
",",
"\"\"",
")",
")",
"->",
"str",
":",
"# http://stackoverflow.com/questions/4205181",
"return",
"\"\"\"\n INSERT INTO {table} ({fields})\n VALUES ({placeholders})\n ON DUPLICATE KEY UPDATE {updatelist}\n \"\"\"",
".",
"format",
"(",
"table",
"=",
"delimit",
"(",
"table",
",",
"delims",
")",
",",
"fields",
"=",
"\",\"",
".",
"join",
"(",
"[",
"delimit",
"(",
"x",
",",
"delims",
")",
"for",
"x",
"in",
"fieldlist",
"]",
")",
",",
"placeholders",
"=",
"\",\"",
".",
"join",
"(",
"[",
"\"?\"",
"]",
"*",
"len",
"(",
"fieldlist",
")",
")",
",",
"updatelist",
"=",
"\",\"",
".",
"join",
"(",
"[",
"\"{field}=VALUES({field})\"",
".",
"format",
"(",
"field",
"=",
"delimit",
"(",
"x",
",",
"delims",
")",
")",
"for",
"x",
"in",
"fieldlist",
"]",
")",
",",
")"
] | Returns ?-marked SQL for an INSERT-or-if-duplicate-key-UPDATE statement. | [
"Returns",
"?",
"-",
"marked",
"SQL",
"for",
"an",
"INSERT",
"-",
"or",
"-",
"if",
"-",
"duplicate",
"-",
"key",
"-",
"UPDATE",
"statement",
"."
] | python | train |
sunshowers/ntfs | ntfsutils/hardlink.py | https://github.com/sunshowers/ntfs/blob/33388a514f0a5a032f68dfeb9a40ce7c772e7cf5/ntfsutils/hardlink.py#L25-L34 | def samefile(path1, path2):
"""
Returns True if path1 and path2 refer to the same file.
"""
# Check if both are on the same volume and have the same file ID
info1 = fs.getfileinfo(path1)
info2 = fs.getfileinfo(path2)
return (info1.dwVolumeSerialNumber == info2.dwVolumeSerialNumber and
info1.nFileIndexHigh == info2.nFileIndexHigh and
info1.nFileIndexLow == info2.nFileIndexLow) | [
"def",
"samefile",
"(",
"path1",
",",
"path2",
")",
":",
"# Check if both are on the same volume and have the same file ID",
"info1",
"=",
"fs",
".",
"getfileinfo",
"(",
"path1",
")",
"info2",
"=",
"fs",
".",
"getfileinfo",
"(",
"path2",
")",
"return",
"(",
"info1",
".",
"dwVolumeSerialNumber",
"==",
"info2",
".",
"dwVolumeSerialNumber",
"and",
"info1",
".",
"nFileIndexHigh",
"==",
"info2",
".",
"nFileIndexHigh",
"and",
"info1",
".",
"nFileIndexLow",
"==",
"info2",
".",
"nFileIndexLow",
")"
] | Returns True if path1 and path2 refer to the same file. | [
"Returns",
"True",
"if",
"path1",
"and",
"path2",
"refer",
"to",
"the",
"same",
"file",
"."
] | python | test |
codelv/enaml-native | src/enamlnative/android/android_fragment.py | https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_fragment.py#L73-L81 | def init_widget(self):
""" Initialize the underlying widget.
"""
super(AndroidFragment, self).init_widget()
f = self.fragment
f.setFragmentListener(f.getId())
f.onCreateView.connect(self.on_create_view)
f.onDestroyView.connect(self.on_destroy_view) | [
"def",
"init_widget",
"(",
"self",
")",
":",
"super",
"(",
"AndroidFragment",
",",
"self",
")",
".",
"init_widget",
"(",
")",
"f",
"=",
"self",
".",
"fragment",
"f",
".",
"setFragmentListener",
"(",
"f",
".",
"getId",
"(",
")",
")",
"f",
".",
"onCreateView",
".",
"connect",
"(",
"self",
".",
"on_create_view",
")",
"f",
".",
"onDestroyView",
".",
"connect",
"(",
"self",
".",
"on_destroy_view",
")"
] | Initialize the underlying widget. | [
"Initialize",
"the",
"underlying",
"widget",
"."
] | python | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/libsvm/__init__.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/libsvm/__init__.py#L17-L93 | def convert(model, input_names='input', target_name='target',
probability='classProbability', input_length='auto'):
"""
Convert a LIBSVM model to Core ML format.
Parameters
----------
model: a libsvm model (C-SVC, nu-SVC, epsilon-SVR, or nu-SVR)
or string path to a saved model.
input_names: str | [str]
Name of the input column(s).
If a single string is used (the default) the input will be an array. The
length of the array will be inferred from the model, this can be overridden
using the 'input_length' parameter.
target: str
Name of the output column.
probability: str
Name of the output class probability column.
Only used for C-SVC and nu-SVC that have been trained with probability
estimates enabled.
input_length: int
Set the length of the input array.
This parameter should only be used when the input is an array (i.e. when
'input_name' is a string).
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a LIBSVM model
>>> import svmutil
>>> problem = svmutil.svm_problem([0,0,1,1], [[0,1], [1,1], [8,9], [7,7]])
>>> libsvm_model = svmutil.svm_train(problem, svmutil.svm_parameter())
# Convert using default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model)
# Save the CoreML model to a file.
>>> coreml_model.save('./my_model.mlmodel')
# Convert using user specified input names
>>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model, input_names=['x', 'y'])
"""
if not(_HAS_LIBSVM):
raise RuntimeError('libsvm not found. libsvm conversion API is disabled.')
if isinstance(model, _string_types):
libsvm_model = _libsvm_util.load_model(model)
else:
libsvm_model = model
if not isinstance(libsvm_model, _libsvm.svm_model):
raise TypeError("Expected 'model' of type '%s' (got %s)" % (_libsvm.svm_model, type(libsvm_model)))
if not isinstance(target_name, _string_types):
raise TypeError("Expected 'target_name' of type str (got %s)" % type(libsvm_model))
if input_length != 'auto' and not isinstance(input_length, int):
raise TypeError("Expected 'input_length' of type int, got %s" % type(input_length))
if input_length != 'auto' and not isinstance(input_names, _string_types):
raise ValueError("'input_length' should not be used unless the input will be only one array.")
if not isinstance(probability, _string_types):
raise TypeError("Expected 'probability' of type str (got %s)" % type(probability))
return _libsvm_converter.convert(libsvm_model, input_names, target_name, input_length, probability) | [
"def",
"convert",
"(",
"model",
",",
"input_names",
"=",
"'input'",
",",
"target_name",
"=",
"'target'",
",",
"probability",
"=",
"'classProbability'",
",",
"input_length",
"=",
"'auto'",
")",
":",
"if",
"not",
"(",
"_HAS_LIBSVM",
")",
":",
"raise",
"RuntimeError",
"(",
"'libsvm not found. libsvm conversion API is disabled.'",
")",
"if",
"isinstance",
"(",
"model",
",",
"_string_types",
")",
":",
"libsvm_model",
"=",
"_libsvm_util",
".",
"load_model",
"(",
"model",
")",
"else",
":",
"libsvm_model",
"=",
"model",
"if",
"not",
"isinstance",
"(",
"libsvm_model",
",",
"_libsvm",
".",
"svm_model",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected 'model' of type '%s' (got %s)\"",
"%",
"(",
"_libsvm",
".",
"svm_model",
",",
"type",
"(",
"libsvm_model",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"target_name",
",",
"_string_types",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected 'target_name' of type str (got %s)\"",
"%",
"type",
"(",
"libsvm_model",
")",
")",
"if",
"input_length",
"!=",
"'auto'",
"and",
"not",
"isinstance",
"(",
"input_length",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected 'input_length' of type int, got %s\"",
"%",
"type",
"(",
"input_length",
")",
")",
"if",
"input_length",
"!=",
"'auto'",
"and",
"not",
"isinstance",
"(",
"input_names",
",",
"_string_types",
")",
":",
"raise",
"ValueError",
"(",
"\"'input_length' should not be used unless the input will be only one array.\"",
")",
"if",
"not",
"isinstance",
"(",
"probability",
",",
"_string_types",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected 'probability' of type str (got %s)\"",
"%",
"type",
"(",
"probability",
")",
")",
"return",
"_libsvm_converter",
".",
"convert",
"(",
"libsvm_model",
",",
"input_names",
",",
"target_name",
",",
"input_length",
",",
"probability",
")"
] | Convert a LIBSVM model to Core ML format.
Parameters
----------
model: a libsvm model (C-SVC, nu-SVC, epsilon-SVR, or nu-SVR)
or string path to a saved model.
input_names: str | [str]
Name of the input column(s).
If a single string is used (the default) the input will be an array. The
length of the array will be inferred from the model, this can be overridden
using the 'input_length' parameter.
target: str
Name of the output column.
probability: str
Name of the output class probability column.
Only used for C-SVC and nu-SVC that have been trained with probability
estimates enabled.
input_length: int
Set the length of the input array.
This parameter should only be used when the input is an array (i.e. when
'input_name' is a string).
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a LIBSVM model
>>> import svmutil
>>> problem = svmutil.svm_problem([0,0,1,1], [[0,1], [1,1], [8,9], [7,7]])
>>> libsvm_model = svmutil.svm_train(problem, svmutil.svm_parameter())
# Convert using default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model)
# Save the CoreML model to a file.
>>> coreml_model.save('./my_model.mlmodel')
# Convert using user specified input names
>>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model, input_names=['x', 'y']) | [
"Convert",
"a",
"LIBSVM",
"model",
"to",
"Core",
"ML",
"format",
"."
] | python | train |
woolfson-group/isambard | isambard/ampal/non_canonical.py | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/non_canonical.py#L16-L66 | def convert_pro_to_hyp(pro):
"""Converts a pro residue to a hydroxypro residue.
All metadata associated with the original pro will be lost i.e. tags.
As a consequence, it is advisable to relabel all atoms in the structure
in order to make them contiguous.
Parameters
----------
pro: ampal.Residue
The proline residue to be mutated to hydroxyproline.
Examples
--------
We can create a collagen model using isambard and convert every third
residue to hydroxyproline:
>>> import isambard
>>> col = isambard.specifications.CoiledCoil.tropocollagen(aa=21)
>>> col.pack_new_sequences(['GPPGPPGPPGPPGPPGPPGPP']*3)
>>> to_convert = [
... res for (i, res) in enumerate(col.get_monomers())
... if not (i + 1) % 3]
>>> for pro in to_convert:
... isambard.ampal.non_canonical.convert_pro_to_hyp(pro)
>>> col.sequences
['GPXGPXGPXGPXGPXGPXGPX', 'GPXGPXGPXGPXGPXGPXGPX', 'GPXGPXGPXGPXGPXGPXGPX']
"""
with open(str(REF_PATH / 'hydroxyproline_ref_1bkv_0_6.pickle'), 'rb') as inf:
hyp_ref = pickle.load(inf)
align_nab(hyp_ref, pro)
to_remove = ['CB', 'CG', 'CD']
for (label, atom) in pro.atoms.items():
if atom.element == 'H':
to_remove.append(label)
for label in to_remove:
del pro.atoms[label]
for key, val in hyp_ref.atoms.items():
if key not in pro.atoms.keys():
pro.atoms[key] = val
pro.mol_code = 'HYP'
pro.mol_letter = 'X'
pro.is_hetero = True
pro.tags = {}
pro.states = {'A': pro.atoms}
pro.active_state = 'A'
for atom in pro.get_atoms():
atom.ampal_parent = pro
atom.tags = {'bfactor': 1.0, 'charge': ' ',
'occupancy': 1.0, 'state': 'A'}
return | [
"def",
"convert_pro_to_hyp",
"(",
"pro",
")",
":",
"with",
"open",
"(",
"str",
"(",
"REF_PATH",
"/",
"'hydroxyproline_ref_1bkv_0_6.pickle'",
")",
",",
"'rb'",
")",
"as",
"inf",
":",
"hyp_ref",
"=",
"pickle",
".",
"load",
"(",
"inf",
")",
"align_nab",
"(",
"hyp_ref",
",",
"pro",
")",
"to_remove",
"=",
"[",
"'CB'",
",",
"'CG'",
",",
"'CD'",
"]",
"for",
"(",
"label",
",",
"atom",
")",
"in",
"pro",
".",
"atoms",
".",
"items",
"(",
")",
":",
"if",
"atom",
".",
"element",
"==",
"'H'",
":",
"to_remove",
".",
"append",
"(",
"label",
")",
"for",
"label",
"in",
"to_remove",
":",
"del",
"pro",
".",
"atoms",
"[",
"label",
"]",
"for",
"key",
",",
"val",
"in",
"hyp_ref",
".",
"atoms",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"pro",
".",
"atoms",
".",
"keys",
"(",
")",
":",
"pro",
".",
"atoms",
"[",
"key",
"]",
"=",
"val",
"pro",
".",
"mol_code",
"=",
"'HYP'",
"pro",
".",
"mol_letter",
"=",
"'X'",
"pro",
".",
"is_hetero",
"=",
"True",
"pro",
".",
"tags",
"=",
"{",
"}",
"pro",
".",
"states",
"=",
"{",
"'A'",
":",
"pro",
".",
"atoms",
"}",
"pro",
".",
"active_state",
"=",
"'A'",
"for",
"atom",
"in",
"pro",
".",
"get_atoms",
"(",
")",
":",
"atom",
".",
"ampal_parent",
"=",
"pro",
"atom",
".",
"tags",
"=",
"{",
"'bfactor'",
":",
"1.0",
",",
"'charge'",
":",
"' '",
",",
"'occupancy'",
":",
"1.0",
",",
"'state'",
":",
"'A'",
"}",
"return"
] | Converts a pro residue to a hydroxypro residue.
All metadata associated with the original pro will be lost i.e. tags.
As a consequence, it is advisable to relabel all atoms in the structure
in order to make them contiguous.
Parameters
----------
pro: ampal.Residue
The proline residue to be mutated to hydroxyproline.
Examples
--------
We can create a collagen model using isambard and convert every third
residue to hydroxyproline:
>>> import isambard
>>> col = isambard.specifications.CoiledCoil.tropocollagen(aa=21)
>>> col.pack_new_sequences(['GPPGPPGPPGPPGPPGPPGPP']*3)
>>> to_convert = [
... res for (i, res) in enumerate(col.get_monomers())
... if not (i + 1) % 3]
>>> for pro in to_convert:
... isambard.ampal.non_canonical.convert_pro_to_hyp(pro)
>>> col.sequences
['GPXGPXGPXGPXGPXGPXGPX', 'GPXGPXGPXGPXGPXGPXGPX', 'GPXGPXGPXGPXGPXGPXGPX'] | [
"Converts",
"a",
"pro",
"residue",
"to",
"a",
"hydroxypro",
"residue",
"."
] | python | train |
thefab/tornadis | tornadis/utils.py | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/utils.py#L108-L120 | def _done_callback(self, wrapped):
"""Internal "done callback" to set the result of the object.
The result of the object if forced by the wrapped future. So this
internal callback must be called when the wrapped future is ready.
Args:
wrapped (Future): the wrapped Future object
"""
if wrapped.exception():
self.set_exception(wrapped.exception())
else:
self.set_result(wrapped.result()) | [
"def",
"_done_callback",
"(",
"self",
",",
"wrapped",
")",
":",
"if",
"wrapped",
".",
"exception",
"(",
")",
":",
"self",
".",
"set_exception",
"(",
"wrapped",
".",
"exception",
"(",
")",
")",
"else",
":",
"self",
".",
"set_result",
"(",
"wrapped",
".",
"result",
"(",
")",
")"
] | Internal "done callback" to set the result of the object.
The result of the object if forced by the wrapped future. So this
internal callback must be called when the wrapped future is ready.
Args:
wrapped (Future): the wrapped Future object | [
"Internal",
"done",
"callback",
"to",
"set",
"the",
"result",
"of",
"the",
"object",
"."
] | python | train |
FujiMakoto/IPS-Vagrant | ips_vagrant/downloaders/downloader.py | https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/downloaders/downloader.py#L97-L116 | def get(self, version, use_cache=True):
"""
Get the filepath to the specified version (downloading it in the process if necessary)
@type version: IpsMeta
@param use_cache: Use cached version downloads if available
@type use_cache: bool
@rtype: str
"""
self.log.info('Retrieving %s version %s', self.meta_name, version.version)
if version.filepath:
if use_cache:
return version.filepath
else:
self.log.info('Ignoring cached %s version: %s', self.meta_name, version.version)
elif not use_cache:
self.log.info("We can't ignore the cache of a version that hasn't been downloaded yet")
version.download()
return version.filepath | [
"def",
"get",
"(",
"self",
",",
"version",
",",
"use_cache",
"=",
"True",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Retrieving %s version %s'",
",",
"self",
".",
"meta_name",
",",
"version",
".",
"version",
")",
"if",
"version",
".",
"filepath",
":",
"if",
"use_cache",
":",
"return",
"version",
".",
"filepath",
"else",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Ignoring cached %s version: %s'",
",",
"self",
".",
"meta_name",
",",
"version",
".",
"version",
")",
"elif",
"not",
"use_cache",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"We can't ignore the cache of a version that hasn't been downloaded yet\"",
")",
"version",
".",
"download",
"(",
")",
"return",
"version",
".",
"filepath"
] | Get the filepath to the specified version (downloading it in the process if necessary)
@type version: IpsMeta
@param use_cache: Use cached version downloads if available
@type use_cache: bool
@rtype: str | [
"Get",
"the",
"filepath",
"to",
"the",
"specified",
"version",
"(",
"downloading",
"it",
"in",
"the",
"process",
"if",
"necessary",
")"
] | python | train |
hobson/aima | aima/search.py | https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/search.py#L613-L620 | def print_boggle(board):
"Print the board in a 2-d array."
n2 = len(board); n = exact_sqrt(n2)
for i in range(n2):
if i % n == 0 and i > 0: print
if board[i] == 'Q': print 'Qu',
else: print str(board[i]) + ' ',
print | [
"def",
"print_boggle",
"(",
"board",
")",
":",
"n2",
"=",
"len",
"(",
"board",
")",
"n",
"=",
"exact_sqrt",
"(",
"n2",
")",
"for",
"i",
"in",
"range",
"(",
"n2",
")",
":",
"if",
"i",
"%",
"n",
"==",
"0",
"and",
"i",
">",
"0",
":",
"print",
"if",
"board",
"[",
"i",
"]",
"==",
"'Q'",
":",
"print",
"'Qu'",
",",
"else",
":",
"print",
"str",
"(",
"board",
"[",
"i",
"]",
")",
"+",
"' '",
",",
"print"
] | Print the board in a 2-d array. | [
"Print",
"the",
"board",
"in",
"a",
"2",
"-",
"d",
"array",
"."
] | python | valid |
sammchardy/python-binance | binance/client.py | https://github.com/sammchardy/python-binance/blob/31c0d0a32f9edd528c6c2c1dd3044d9a34ce43cc/binance/client.py#L1565-L1594 | def get_asset_balance(self, asset, **params):
"""Get current asset balance.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-information-user_data
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: dictionary or None if not found
.. code-block:: python
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
}
:raises: BinanceRequestException, BinanceAPIException
"""
res = self.get_account(**params)
# find asset balance in list of balances
if "balances" in res:
for bal in res['balances']:
if bal['asset'].lower() == asset.lower():
return bal
return None | [
"def",
"get_asset_balance",
"(",
"self",
",",
"asset",
",",
"*",
"*",
"params",
")",
":",
"res",
"=",
"self",
".",
"get_account",
"(",
"*",
"*",
"params",
")",
"# find asset balance in list of balances",
"if",
"\"balances\"",
"in",
"res",
":",
"for",
"bal",
"in",
"res",
"[",
"'balances'",
"]",
":",
"if",
"bal",
"[",
"'asset'",
"]",
".",
"lower",
"(",
")",
"==",
"asset",
".",
"lower",
"(",
")",
":",
"return",
"bal",
"return",
"None"
] | Get current asset balance.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-information-user_data
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: dictionary or None if not found
.. code-block:: python
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
}
:raises: BinanceRequestException, BinanceAPIException | [
"Get",
"current",
"asset",
"balance",
"."
] | python | train |
cltk/cltk | cltk/prosody/latin/syllabifier.py | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/syllabifier.py#L306-L315 | def _find_consonant_cluster(self, letters: List[str]) -> List[int]:
"""
Find clusters of consonants that do not contain a vowel.
:param letters:
:return:
"""
for idx, letter_group in enumerate(letters):
if self._contains_consonants(letter_group) and not self._contains_vowels(letter_group):
return [idx]
return [] | [
"def",
"_find_consonant_cluster",
"(",
"self",
",",
"letters",
":",
"List",
"[",
"str",
"]",
")",
"->",
"List",
"[",
"int",
"]",
":",
"for",
"idx",
",",
"letter_group",
"in",
"enumerate",
"(",
"letters",
")",
":",
"if",
"self",
".",
"_contains_consonants",
"(",
"letter_group",
")",
"and",
"not",
"self",
".",
"_contains_vowels",
"(",
"letter_group",
")",
":",
"return",
"[",
"idx",
"]",
"return",
"[",
"]"
] | Find clusters of consonants that do not contain a vowel.
:param letters:
:return: | [
"Find",
"clusters",
"of",
"consonants",
"that",
"do",
"not",
"contain",
"a",
"vowel",
".",
":",
"param",
"letters",
":",
":",
"return",
":"
] | python | train |
spyder-ide/spyder | spyder/plugins/editor/panels/indentationguides.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/panels/indentationguides.py#L32-L80 | def paintEvent(self, event):
"""Override Qt method."""
painter = QPainter(self)
color = QColor(self.color)
color.setAlphaF(.5)
painter.setPen(color)
offset = self.editor.document().documentMargin() + \
self.editor.contentOffset().x()
for _, line_number, block in self.editor.visible_blocks:
indentation = TextBlockHelper.get_fold_lvl(block)
ref_lvl = indentation
block = block.next()
last_line = block.blockNumber()
lvl = TextBlockHelper.get_fold_lvl(block)
if ref_lvl == lvl: # for zone set programmatically such as imports
# in pyqode.python
ref_lvl -= 1
while (block.isValid() and
TextBlockHelper.get_fold_lvl(block) > ref_lvl):
last_line = block.blockNumber()
block = block.next()
end_of_sub_fold = block
if last_line:
block = block.document().findBlockByNumber(last_line)
while ((block.blockNumber()) and (block.text().strip() == ''
or block.text().strip().startswith('#'))):
block = block.previous()
last_line = block.blockNumber()
block = self.editor.document().findBlockByNumber(line_number)
top = int(self.editor.blockBoundingGeometry(block).translated(
self.editor.contentOffset()).top())
bottom = top + int(self.editor.blockBoundingRect(block).height())
indentation = TextBlockHelper.get_fold_lvl(block)
for i in range(1, indentation):
if (line_number > last_line and
TextBlockHelper.get_fold_lvl(end_of_sub_fold) <= i):
continue
else:
x = self.editor.fontMetrics().width(i * self.i_width *
'9') + offset
painter.drawLine(x, top, x, bottom) | [
"def",
"paintEvent",
"(",
"self",
",",
"event",
")",
":",
"painter",
"=",
"QPainter",
"(",
"self",
")",
"color",
"=",
"QColor",
"(",
"self",
".",
"color",
")",
"color",
".",
"setAlphaF",
"(",
".5",
")",
"painter",
".",
"setPen",
"(",
"color",
")",
"offset",
"=",
"self",
".",
"editor",
".",
"document",
"(",
")",
".",
"documentMargin",
"(",
")",
"+",
"self",
".",
"editor",
".",
"contentOffset",
"(",
")",
".",
"x",
"(",
")",
"for",
"_",
",",
"line_number",
",",
"block",
"in",
"self",
".",
"editor",
".",
"visible_blocks",
":",
"indentation",
"=",
"TextBlockHelper",
".",
"get_fold_lvl",
"(",
"block",
")",
"ref_lvl",
"=",
"indentation",
"block",
"=",
"block",
".",
"next",
"(",
")",
"last_line",
"=",
"block",
".",
"blockNumber",
"(",
")",
"lvl",
"=",
"TextBlockHelper",
".",
"get_fold_lvl",
"(",
"block",
")",
"if",
"ref_lvl",
"==",
"lvl",
":",
"# for zone set programmatically such as imports",
"# in pyqode.python",
"ref_lvl",
"-=",
"1",
"while",
"(",
"block",
".",
"isValid",
"(",
")",
"and",
"TextBlockHelper",
".",
"get_fold_lvl",
"(",
"block",
")",
">",
"ref_lvl",
")",
":",
"last_line",
"=",
"block",
".",
"blockNumber",
"(",
")",
"block",
"=",
"block",
".",
"next",
"(",
")",
"end_of_sub_fold",
"=",
"block",
"if",
"last_line",
":",
"block",
"=",
"block",
".",
"document",
"(",
")",
".",
"findBlockByNumber",
"(",
"last_line",
")",
"while",
"(",
"(",
"block",
".",
"blockNumber",
"(",
")",
")",
"and",
"(",
"block",
".",
"text",
"(",
")",
".",
"strip",
"(",
")",
"==",
"''",
"or",
"block",
".",
"text",
"(",
")",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'#'",
")",
")",
")",
":",
"block",
"=",
"block",
".",
"previous",
"(",
")",
"last_line",
"=",
"block",
".",
"blockNumber",
"(",
")",
"block",
"=",
"self",
".",
"editor",
".",
"document",
"(",
")",
".",
"findBlockByNumber",
"(",
"line_number",
")",
"top",
"=",
"int",
"(",
"self",
".",
"editor",
".",
"blockBoundingGeometry",
"(",
"block",
")",
".",
"translated",
"(",
"self",
".",
"editor",
".",
"contentOffset",
"(",
")",
")",
".",
"top",
"(",
")",
")",
"bottom",
"=",
"top",
"+",
"int",
"(",
"self",
".",
"editor",
".",
"blockBoundingRect",
"(",
"block",
")",
".",
"height",
"(",
")",
")",
"indentation",
"=",
"TextBlockHelper",
".",
"get_fold_lvl",
"(",
"block",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"indentation",
")",
":",
"if",
"(",
"line_number",
">",
"last_line",
"and",
"TextBlockHelper",
".",
"get_fold_lvl",
"(",
"end_of_sub_fold",
")",
"<=",
"i",
")",
":",
"continue",
"else",
":",
"x",
"=",
"self",
".",
"editor",
".",
"fontMetrics",
"(",
")",
".",
"width",
"(",
"i",
"*",
"self",
".",
"i_width",
"*",
"'9'",
")",
"+",
"offset",
"painter",
".",
"drawLine",
"(",
"x",
",",
"top",
",",
"x",
",",
"bottom",
")"
] | Override Qt method. | [
"Override",
"Qt",
"method",
"."
] | python | train |
radjkarl/fancyWidgets | DUMP/pyqtgraphBased/parametertree/parameterTypes.py | https://github.com/radjkarl/fancyWidgets/blob/ffe0d5747c5296c78575f0e0909af915a4a5698f/DUMP/pyqtgraphBased/parametertree/parameterTypes.py#L287-L303 | def optsChanged(self, param, opts):
"""Called when any options are changed that are not
name, value, default, or limits"""
# print "opts changed:", opts
ParameterItem.optsChanged(self, param, opts)
w = self.widget
if 'readonly' in opts:
self.updateDefaultBtn()
if isinstance(w, (QtWidgets.QCheckBox, ColorButton)):
w.setEnabled(not opts['readonly'])
# If widget is a SpinBox, pass options straight through
if isinstance(self.widget, SpinBox):
if 'units' in opts and 'suffix' not in opts:
opts['suffix'] = opts['units']
w.setOpts(**opts)
self.updateDisplayLabel() | [
"def",
"optsChanged",
"(",
"self",
",",
"param",
",",
"opts",
")",
":",
"# print \"opts changed:\", opts",
"ParameterItem",
".",
"optsChanged",
"(",
"self",
",",
"param",
",",
"opts",
")",
"w",
"=",
"self",
".",
"widget",
"if",
"'readonly'",
"in",
"opts",
":",
"self",
".",
"updateDefaultBtn",
"(",
")",
"if",
"isinstance",
"(",
"w",
",",
"(",
"QtWidgets",
".",
"QCheckBox",
",",
"ColorButton",
")",
")",
":",
"w",
".",
"setEnabled",
"(",
"not",
"opts",
"[",
"'readonly'",
"]",
")",
"# If widget is a SpinBox, pass options straight through",
"if",
"isinstance",
"(",
"self",
".",
"widget",
",",
"SpinBox",
")",
":",
"if",
"'units'",
"in",
"opts",
"and",
"'suffix'",
"not",
"in",
"opts",
":",
"opts",
"[",
"'suffix'",
"]",
"=",
"opts",
"[",
"'units'",
"]",
"w",
".",
"setOpts",
"(",
"*",
"*",
"opts",
")",
"self",
".",
"updateDisplayLabel",
"(",
")"
] | Called when any options are changed that are not
name, value, default, or limits | [
"Called",
"when",
"any",
"options",
"are",
"changed",
"that",
"are",
"not",
"name",
"value",
"default",
"or",
"limits"
] | python | train |
libyal/dtfabric | dtfabric/runtime/data_maps.py | https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/runtime/data_maps.py#L1986-L2000 | def GetName(self, number):
"""Retrieves the name of an enumeration value by number.
Args:
number (int): number.
Returns:
str: name of the enumeration value or None if no corresponding
enumeration value was found.
"""
value = self._data_type_definition.values_per_number.get(number, None)
if not value:
return None
return value.name | [
"def",
"GetName",
"(",
"self",
",",
"number",
")",
":",
"value",
"=",
"self",
".",
"_data_type_definition",
".",
"values_per_number",
".",
"get",
"(",
"number",
",",
"None",
")",
"if",
"not",
"value",
":",
"return",
"None",
"return",
"value",
".",
"name"
] | Retrieves the name of an enumeration value by number.
Args:
number (int): number.
Returns:
str: name of the enumeration value or None if no corresponding
enumeration value was found. | [
"Retrieves",
"the",
"name",
"of",
"an",
"enumeration",
"value",
"by",
"number",
"."
] | python | train |
quantmind/pulsar | docs/_ext/sphinxtogithub.py | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/docs/_ext/sphinxtogithub.py#L288-L292 | def setup(app):
"Setup function for Sphinx Extension"
app.add_config_value("sphinx_to_github", True, '')
app.add_config_value("sphinx_to_github_verbose", True, '')
app.connect("build-finished", sphinx_extension) | [
"def",
"setup",
"(",
"app",
")",
":",
"app",
".",
"add_config_value",
"(",
"\"sphinx_to_github\"",
",",
"True",
",",
"''",
")",
"app",
".",
"add_config_value",
"(",
"\"sphinx_to_github_verbose\"",
",",
"True",
",",
"''",
")",
"app",
".",
"connect",
"(",
"\"build-finished\"",
",",
"sphinx_extension",
")"
] | Setup function for Sphinx Extension | [
"Setup",
"function",
"for",
"Sphinx",
"Extension"
] | python | train |
hvac/hvac | hvac/v1/__init__.py | https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/v1/__init__.py#L647-L673 | def auth_ec2(self, pkcs7, nonce=None, role=None, use_token=True, mount_point='aws-ec2'):
"""POST /auth/<mount point>/login
:param pkcs7: PKCS#7 version of an AWS Instance Identity Document from the EC2 Metadata Service.
:type pkcs7: str.
:param nonce: Optional nonce returned as part of the original authentication request. Not required if the backend
has "allow_instance_migration" or "disallow_reauthentication" options turned on.
:type nonce: str.
:param role: Identifier for the AWS auth backend role being requested.
:type role: str.
:param use_token: If True, uses the token in the response received from the auth request to set the "token"
attribute on the current Client class instance.
:type use_token: bool.
:param mount_point: The "path" the AWS auth backend was mounted on. Vault currently defaults to "aws". "aws-ec2"
is the default argument for backwards comparability within this module.
:type mount_point: str.
:return: parsed JSON response from the auth POST request
:rtype: dict.
"""
params = {'pkcs7': pkcs7}
if nonce:
params['nonce'] = nonce
if role:
params['role'] = role
return self.login('/v1/auth/{0}/login'.format(mount_point), json=params, use_token=use_token) | [
"def",
"auth_ec2",
"(",
"self",
",",
"pkcs7",
",",
"nonce",
"=",
"None",
",",
"role",
"=",
"None",
",",
"use_token",
"=",
"True",
",",
"mount_point",
"=",
"'aws-ec2'",
")",
":",
"params",
"=",
"{",
"'pkcs7'",
":",
"pkcs7",
"}",
"if",
"nonce",
":",
"params",
"[",
"'nonce'",
"]",
"=",
"nonce",
"if",
"role",
":",
"params",
"[",
"'role'",
"]",
"=",
"role",
"return",
"self",
".",
"login",
"(",
"'/v1/auth/{0}/login'",
".",
"format",
"(",
"mount_point",
")",
",",
"json",
"=",
"params",
",",
"use_token",
"=",
"use_token",
")"
] | POST /auth/<mount point>/login
:param pkcs7: PKCS#7 version of an AWS Instance Identity Document from the EC2 Metadata Service.
:type pkcs7: str.
:param nonce: Optional nonce returned as part of the original authentication request. Not required if the backend
has "allow_instance_migration" or "disallow_reauthentication" options turned on.
:type nonce: str.
:param role: Identifier for the AWS auth backend role being requested.
:type role: str.
:param use_token: If True, uses the token in the response received from the auth request to set the "token"
attribute on the current Client class instance.
:type use_token: bool.
:param mount_point: The "path" the AWS auth backend was mounted on. Vault currently defaults to "aws". "aws-ec2"
is the default argument for backwards comparability within this module.
:type mount_point: str.
:return: parsed JSON response from the auth POST request
:rtype: dict. | [
"POST",
"/",
"auth",
"/",
"<mount",
"point",
">",
"/",
"login"
] | python | train |
balloob/pychromecast | pychromecast/controllers/multizone.py | https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/controllers/multizone.py#L68-L76 | def multizone_member_added(self, member_uuid):
"""Handle added audio group member."""
casts = self._casts
if member_uuid not in casts:
casts[member_uuid] = {'listeners': [],
'groups': set()}
casts[member_uuid]['groups'].add(self._group_uuid)
for listener in list(casts[member_uuid]['listeners']):
listener.added_to_multizone(self._group_uuid) | [
"def",
"multizone_member_added",
"(",
"self",
",",
"member_uuid",
")",
":",
"casts",
"=",
"self",
".",
"_casts",
"if",
"member_uuid",
"not",
"in",
"casts",
":",
"casts",
"[",
"member_uuid",
"]",
"=",
"{",
"'listeners'",
":",
"[",
"]",
",",
"'groups'",
":",
"set",
"(",
")",
"}",
"casts",
"[",
"member_uuid",
"]",
"[",
"'groups'",
"]",
".",
"add",
"(",
"self",
".",
"_group_uuid",
")",
"for",
"listener",
"in",
"list",
"(",
"casts",
"[",
"member_uuid",
"]",
"[",
"'listeners'",
"]",
")",
":",
"listener",
".",
"added_to_multizone",
"(",
"self",
".",
"_group_uuid",
")"
] | Handle added audio group member. | [
"Handle",
"added",
"audio",
"group",
"member",
"."
] | python | train |
PyCQA/astroid | astroid/as_string.py | https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/as_string.py#L615-L623 | def _import_string(names):
"""return a list of (name, asname) formatted as a string"""
_names = []
for name, asname in names:
if asname is not None:
_names.append("%s as %s" % (name, asname))
else:
_names.append(name)
return ", ".join(_names) | [
"def",
"_import_string",
"(",
"names",
")",
":",
"_names",
"=",
"[",
"]",
"for",
"name",
",",
"asname",
"in",
"names",
":",
"if",
"asname",
"is",
"not",
"None",
":",
"_names",
".",
"append",
"(",
"\"%s as %s\"",
"%",
"(",
"name",
",",
"asname",
")",
")",
"else",
":",
"_names",
".",
"append",
"(",
"name",
")",
"return",
"\", \"",
".",
"join",
"(",
"_names",
")"
] | return a list of (name, asname) formatted as a string | [
"return",
"a",
"list",
"of",
"(",
"name",
"asname",
")",
"formatted",
"as",
"a",
"string"
] | python | train |
sorgerlab/indra | indra/assemblers/pysb/bmi_wrapper.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/bmi_wrapper.py#L203-L215 | def get_input_var_names(self):
"""Return a list of variables names that can be set as input.
Returns
-------
var_names : list[str]
A list of variable names that can be set from the outside
"""
in_vars = copy.copy(self.input_vars)
for idx, var in enumerate(in_vars):
if self._map_in_out(var) is not None:
in_vars[idx] = self._map_in_out(var)
return in_vars | [
"def",
"get_input_var_names",
"(",
"self",
")",
":",
"in_vars",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"input_vars",
")",
"for",
"idx",
",",
"var",
"in",
"enumerate",
"(",
"in_vars",
")",
":",
"if",
"self",
".",
"_map_in_out",
"(",
"var",
")",
"is",
"not",
"None",
":",
"in_vars",
"[",
"idx",
"]",
"=",
"self",
".",
"_map_in_out",
"(",
"var",
")",
"return",
"in_vars"
] | Return a list of variables names that can be set as input.
Returns
-------
var_names : list[str]
A list of variable names that can be set from the outside | [
"Return",
"a",
"list",
"of",
"variables",
"names",
"that",
"can",
"be",
"set",
"as",
"input",
"."
] | python | train |
Komnomnomnom/swigibpy | swigibpy.py | https://github.com/Komnomnomnom/swigibpy/blob/cfd307fdbfaffabc69a2dc037538d7e34a8b8daf/swigibpy.py#L1557-L1559 | def exerciseOptions(self, tickerId, contract, exerciseAction, exerciseQuantity, account, override):
"""exerciseOptions(EClientSocketBase self, TickerId tickerId, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override)"""
return _swigibpy.EClientSocketBase_exerciseOptions(self, tickerId, contract, exerciseAction, exerciseQuantity, account, override) | [
"def",
"exerciseOptions",
"(",
"self",
",",
"tickerId",
",",
"contract",
",",
"exerciseAction",
",",
"exerciseQuantity",
",",
"account",
",",
"override",
")",
":",
"return",
"_swigibpy",
".",
"EClientSocketBase_exerciseOptions",
"(",
"self",
",",
"tickerId",
",",
"contract",
",",
"exerciseAction",
",",
"exerciseQuantity",
",",
"account",
",",
"override",
")"
] | exerciseOptions(EClientSocketBase self, TickerId tickerId, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override) | [
"exerciseOptions",
"(",
"EClientSocketBase",
"self",
"TickerId",
"tickerId",
"Contract",
"contract",
"int",
"exerciseAction",
"int",
"exerciseQuantity",
"IBString",
"const",
"&",
"account",
"int",
"override",
")"
] | python | train |
priestc/giotto | giotto/views/__init__.py | https://github.com/priestc/giotto/blob/d4c26380caefa7745bb27135e315de830f7254d3/giotto/views/__init__.py#L219-L228 | def jinja_template(template_name, name='data', mimetype="text/html"):
"""
Meta-renderer for rendering jinja templates
"""
def jinja_renderer(result, errors):
template = get_jinja_template(template_name)
context = {name: result or Mock(), 'errors': errors, 'enumerate': enumerate}
rendered = template.render(**context)
return {'body': rendered, 'mimetype': mimetype}
return jinja_renderer | [
"def",
"jinja_template",
"(",
"template_name",
",",
"name",
"=",
"'data'",
",",
"mimetype",
"=",
"\"text/html\"",
")",
":",
"def",
"jinja_renderer",
"(",
"result",
",",
"errors",
")",
":",
"template",
"=",
"get_jinja_template",
"(",
"template_name",
")",
"context",
"=",
"{",
"name",
":",
"result",
"or",
"Mock",
"(",
")",
",",
"'errors'",
":",
"errors",
",",
"'enumerate'",
":",
"enumerate",
"}",
"rendered",
"=",
"template",
".",
"render",
"(",
"*",
"*",
"context",
")",
"return",
"{",
"'body'",
":",
"rendered",
",",
"'mimetype'",
":",
"mimetype",
"}",
"return",
"jinja_renderer"
] | Meta-renderer for rendering jinja templates | [
"Meta",
"-",
"renderer",
"for",
"rendering",
"jinja",
"templates"
] | python | train |
warner/magic-wormhole | src/wormhole/xfer_util.py | https://github.com/warner/magic-wormhole/blob/995d3f546a33eec4f64df929848d86937d2003a7/src/wormhole/xfer_util.py#L10-L75 | def receive(reactor,
appid,
relay_url,
code,
use_tor=False,
launch_tor=False,
tor_control_port=None,
on_code=None):
"""
This is a convenience API which returns a Deferred that callbacks
with a single chunk of data from another wormhole (and then closes
the wormhole). Under the hood, it's just using an instance
returned from :func:`wormhole.wormhole`. This is similar to the
`wormhole receive` command.
:param unicode appid: our application ID
:param unicode relay_url: the relay URL to use
:param unicode code: a pre-existing code to use, or None
:param bool use_tor: True if we should use Tor, False to not use it (None
for default)
:param on_code: if not None, this is called when we have a code (even if
you passed in one explicitly)
:type on_code: single-argument callable
"""
tor = None
if use_tor:
tor = yield get_tor(reactor, launch_tor, tor_control_port)
# For now, block everything until Tor has started. Soon: launch
# tor in parallel with everything else, make sure the Tor object
# can lazy-provide an endpoint, and overlap the startup process
# with the user handing off the wormhole code
wh = wormhole.create(appid, relay_url, reactor, tor=tor)
if code is None:
wh.allocate_code()
code = yield wh.get_code()
else:
wh.set_code(code)
# we'll call this no matter what, even if you passed in a code --
# maybe it should be only in the 'if' block above?
if on_code:
on_code(code)
data = yield wh.get_message()
data = json.loads(data.decode("utf-8"))
offer = data.get('offer', None)
if not offer:
raise Exception("Do not understand response: {}".format(data))
msg = None
if 'message' in offer:
msg = offer['message']
wh.send_message(
json.dumps({
"answer": {
"message_ack": "ok"
}
}).encode("utf-8"))
else:
raise Exception("Unknown offer type: {}".format(offer.keys()))
yield wh.close()
returnValue(msg) | [
"def",
"receive",
"(",
"reactor",
",",
"appid",
",",
"relay_url",
",",
"code",
",",
"use_tor",
"=",
"False",
",",
"launch_tor",
"=",
"False",
",",
"tor_control_port",
"=",
"None",
",",
"on_code",
"=",
"None",
")",
":",
"tor",
"=",
"None",
"if",
"use_tor",
":",
"tor",
"=",
"yield",
"get_tor",
"(",
"reactor",
",",
"launch_tor",
",",
"tor_control_port",
")",
"# For now, block everything until Tor has started. Soon: launch",
"# tor in parallel with everything else, make sure the Tor object",
"# can lazy-provide an endpoint, and overlap the startup process",
"# with the user handing off the wormhole code",
"wh",
"=",
"wormhole",
".",
"create",
"(",
"appid",
",",
"relay_url",
",",
"reactor",
",",
"tor",
"=",
"tor",
")",
"if",
"code",
"is",
"None",
":",
"wh",
".",
"allocate_code",
"(",
")",
"code",
"=",
"yield",
"wh",
".",
"get_code",
"(",
")",
"else",
":",
"wh",
".",
"set_code",
"(",
"code",
")",
"# we'll call this no matter what, even if you passed in a code --",
"# maybe it should be only in the 'if' block above?",
"if",
"on_code",
":",
"on_code",
"(",
"code",
")",
"data",
"=",
"yield",
"wh",
".",
"get_message",
"(",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"data",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"offer",
"=",
"data",
".",
"get",
"(",
"'offer'",
",",
"None",
")",
"if",
"not",
"offer",
":",
"raise",
"Exception",
"(",
"\"Do not understand response: {}\"",
".",
"format",
"(",
"data",
")",
")",
"msg",
"=",
"None",
"if",
"'message'",
"in",
"offer",
":",
"msg",
"=",
"offer",
"[",
"'message'",
"]",
"wh",
".",
"send_message",
"(",
"json",
".",
"dumps",
"(",
"{",
"\"answer\"",
":",
"{",
"\"message_ack\"",
":",
"\"ok\"",
"}",
"}",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Unknown offer type: {}\"",
".",
"format",
"(",
"offer",
".",
"keys",
"(",
")",
")",
")",
"yield",
"wh",
".",
"close",
"(",
")",
"returnValue",
"(",
"msg",
")"
] | This is a convenience API which returns a Deferred that callbacks
with a single chunk of data from another wormhole (and then closes
the wormhole). Under the hood, it's just using an instance
returned from :func:`wormhole.wormhole`. This is similar to the
`wormhole receive` command.
:param unicode appid: our application ID
:param unicode relay_url: the relay URL to use
:param unicode code: a pre-existing code to use, or None
:param bool use_tor: True if we should use Tor, False to not use it (None
for default)
:param on_code: if not None, this is called when we have a code (even if
you passed in one explicitly)
:type on_code: single-argument callable | [
"This",
"is",
"a",
"convenience",
"API",
"which",
"returns",
"a",
"Deferred",
"that",
"callbacks",
"with",
"a",
"single",
"chunk",
"of",
"data",
"from",
"another",
"wormhole",
"(",
"and",
"then",
"closes",
"the",
"wormhole",
")",
".",
"Under",
"the",
"hood",
"it",
"s",
"just",
"using",
"an",
"instance",
"returned",
"from",
":",
"func",
":",
"wormhole",
".",
"wormhole",
".",
"This",
"is",
"similar",
"to",
"the",
"wormhole",
"receive",
"command",
"."
] | python | train |
klavinslab/coral | coral/database/_yeast.py | https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/database/_yeast.py#L6-L83 | def fetch_yeast_locus_sequence(locus_name, flanking_size=0):
'''Acquire a sequence from SGD http://www.yeastgenome.org.
:param locus_name: Common name or systematic name for the locus (e.g. ACT1
or YFL039C).
:type locus_name: str
:param flanking_size: The length of flanking DNA (on each side) to return
:type flanking_size: int
'''
from intermine.webservice import Service
service = Service('http://yeastmine.yeastgenome.org/yeastmine/service')
# Get a new query on the class (table) you will be querying:
query = service.new_query('Gene')
if flanking_size > 0:
# The view specifies the output columns
# secondaryIdentifier: the systematic name (e.g. YFL039C)
# symbol: short name (e.g. ACT1)
# length: sequence length
# flankingRegions.direction: Upstream or downstream (or both) of locus
# flankingRegions.sequence.length: length of the flanking regions
# flankingRegions.sequence.residues: sequence of the flanking regions
query.add_view('secondaryIdentifier', 'symbol', 'length',
'flankingRegions.direction',
'flankingRegions.sequence.length',
'flankingRegions.sequence.residues')
# You can edit the constraint values below
query.add_constraint('flankingRegions.direction', '=', 'both',
code='A')
query.add_constraint('Gene', 'LOOKUP', locus_name, 'S. cerevisiae',
code='B')
query.add_constraint('flankingRegions.distance', '=',
'{:.1f}kb'.format(flanking_size / 1000.),
code='C')
# Uncomment and edit the code below to specify your own custom logic:
query.set_logic('A and B and C')
# TODO: What to do when there's more than one result?
first_result = query.rows().next()
# FIXME: Use logger module instead
# print first_result['secondaryIdentifier']
# print first_result['symbol'], row['length']
# print first_result['flankingRegions.direction']
# print first_result['flankingRegions.sequence.length']
# print first_result['flankingRegions.sequence.residues']
seq = coral.DNA(first_result['flankingRegions.sequence.residues'])
# TODO: add more metadata
elif flanking_size == 0:
# The view specifies the output columns
query.add_view('primaryIdentifier', 'secondaryIdentifier', 'symbol',
'name', 'sgdAlias', 'organism.shortName',
'sequence.length', 'sequence.residues', 'description',
'qualifier')
query.add_constraint('status', 'IS NULL', code='D')
query.add_constraint('status', '=', 'Active', code='C')
query.add_constraint('qualifier', 'IS NULL', code='B')
query.add_constraint('qualifier', '!=', 'Dubious', code='A')
query.add_constraint('Gene', 'LOOKUP', locus_name, 'S. cerevisiae',
code='E')
# Your custom constraint logic is specified with the code below:
query.set_logic('(A or B) and (C or D) and E')
first_result = query.rows().next()
seq = coral.DNA(first_result['sequence.residues'])
else:
print 'Problem with the flanking region size....'
seq = coral.DNA('')
return seq | [
"def",
"fetch_yeast_locus_sequence",
"(",
"locus_name",
",",
"flanking_size",
"=",
"0",
")",
":",
"from",
"intermine",
".",
"webservice",
"import",
"Service",
"service",
"=",
"Service",
"(",
"'http://yeastmine.yeastgenome.org/yeastmine/service'",
")",
"# Get a new query on the class (table) you will be querying:",
"query",
"=",
"service",
".",
"new_query",
"(",
"'Gene'",
")",
"if",
"flanking_size",
">",
"0",
":",
"# The view specifies the output columns",
"# secondaryIdentifier: the systematic name (e.g. YFL039C)",
"# symbol: short name (e.g. ACT1)",
"# length: sequence length",
"# flankingRegions.direction: Upstream or downstream (or both) of locus",
"# flankingRegions.sequence.length: length of the flanking regions",
"# flankingRegions.sequence.residues: sequence of the flanking regions",
"query",
".",
"add_view",
"(",
"'secondaryIdentifier'",
",",
"'symbol'",
",",
"'length'",
",",
"'flankingRegions.direction'",
",",
"'flankingRegions.sequence.length'",
",",
"'flankingRegions.sequence.residues'",
")",
"# You can edit the constraint values below",
"query",
".",
"add_constraint",
"(",
"'flankingRegions.direction'",
",",
"'='",
",",
"'both'",
",",
"code",
"=",
"'A'",
")",
"query",
".",
"add_constraint",
"(",
"'Gene'",
",",
"'LOOKUP'",
",",
"locus_name",
",",
"'S. cerevisiae'",
",",
"code",
"=",
"'B'",
")",
"query",
".",
"add_constraint",
"(",
"'flankingRegions.distance'",
",",
"'='",
",",
"'{:.1f}kb'",
".",
"format",
"(",
"flanking_size",
"/",
"1000.",
")",
",",
"code",
"=",
"'C'",
")",
"# Uncomment and edit the code below to specify your own custom logic:",
"query",
".",
"set_logic",
"(",
"'A and B and C'",
")",
"# TODO: What to do when there's more than one result?",
"first_result",
"=",
"query",
".",
"rows",
"(",
")",
".",
"next",
"(",
")",
"# FIXME: Use logger module instead",
"# print first_result['secondaryIdentifier']",
"# print first_result['symbol'], row['length']",
"# print first_result['flankingRegions.direction']",
"# print first_result['flankingRegions.sequence.length']",
"# print first_result['flankingRegions.sequence.residues']",
"seq",
"=",
"coral",
".",
"DNA",
"(",
"first_result",
"[",
"'flankingRegions.sequence.residues'",
"]",
")",
"# TODO: add more metadata",
"elif",
"flanking_size",
"==",
"0",
":",
"# The view specifies the output columns",
"query",
".",
"add_view",
"(",
"'primaryIdentifier'",
",",
"'secondaryIdentifier'",
",",
"'symbol'",
",",
"'name'",
",",
"'sgdAlias'",
",",
"'organism.shortName'",
",",
"'sequence.length'",
",",
"'sequence.residues'",
",",
"'description'",
",",
"'qualifier'",
")",
"query",
".",
"add_constraint",
"(",
"'status'",
",",
"'IS NULL'",
",",
"code",
"=",
"'D'",
")",
"query",
".",
"add_constraint",
"(",
"'status'",
",",
"'='",
",",
"'Active'",
",",
"code",
"=",
"'C'",
")",
"query",
".",
"add_constraint",
"(",
"'qualifier'",
",",
"'IS NULL'",
",",
"code",
"=",
"'B'",
")",
"query",
".",
"add_constraint",
"(",
"'qualifier'",
",",
"'!='",
",",
"'Dubious'",
",",
"code",
"=",
"'A'",
")",
"query",
".",
"add_constraint",
"(",
"'Gene'",
",",
"'LOOKUP'",
",",
"locus_name",
",",
"'S. cerevisiae'",
",",
"code",
"=",
"'E'",
")",
"# Your custom constraint logic is specified with the code below:",
"query",
".",
"set_logic",
"(",
"'(A or B) and (C or D) and E'",
")",
"first_result",
"=",
"query",
".",
"rows",
"(",
")",
".",
"next",
"(",
")",
"seq",
"=",
"coral",
".",
"DNA",
"(",
"first_result",
"[",
"'sequence.residues'",
"]",
")",
"else",
":",
"print",
"'Problem with the flanking region size....'",
"seq",
"=",
"coral",
".",
"DNA",
"(",
"''",
")",
"return",
"seq"
] | Acquire a sequence from SGD http://www.yeastgenome.org.
:param locus_name: Common name or systematic name for the locus (e.g. ACT1
or YFL039C).
:type locus_name: str
:param flanking_size: The length of flanking DNA (on each side) to return
:type flanking_size: int | [
"Acquire",
"a",
"sequence",
"from",
"SGD",
"http",
":",
"//",
"www",
".",
"yeastgenome",
".",
"org",
"."
] | python | train |
fopina/tgbotplug | tgbot/botapi.py | https://github.com/fopina/tgbotplug/blob/c115733b03f2e23ddcdecfce588d1a6a1e5bde91/tgbot/botapi.py#L1537-L1588 | def send_video(chat_id, video,
duration=None, caption=None, reply_to_message_id=None, reply_markup=None,
**kwargs):
"""
Use this method to send video files, Telegram clients support mp4 videos (other formats may be sent as Document).
:param chat_id: Unique identifier for the message recipient — User or GroupChat id
:param video: Video to send. You can either pass a file_id as String to resend a
video that is already on the Telegram servers, or upload a new video
using multipart/form-data.
:param duration: Duration of sent video in seconds
:param caption: Video caption (may also be used when resending videos by file_id)
:param reply_to_message_id: If the message is a reply, ID of the original message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param \*\*kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int
:type video: InputFile or str
:type duration: int
:type caption: str
:type reply_to_message_id: int
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:returns: On success, the sent Message is returned.
:rtype: TelegramBotRPCRequest
"""
files = None
if isinstance(video, InputFile):
files = [video]
video = None
elif not isinstance(video, str):
raise Exception('video must be instance of InputFile or str')
# required args
params = dict(
chat_id=chat_id,
video=video
)
# optional args
params.update(
_clean_params(
duration=duration,
caption=caption,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup
)
)
return TelegramBotRPCRequest('sendVideo', params=params, files=files, on_result=Message.from_result, **kwargs) | [
"def",
"send_video",
"(",
"chat_id",
",",
"video",
",",
"duration",
"=",
"None",
",",
"caption",
"=",
"None",
",",
"reply_to_message_id",
"=",
"None",
",",
"reply_markup",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"files",
"=",
"None",
"if",
"isinstance",
"(",
"video",
",",
"InputFile",
")",
":",
"files",
"=",
"[",
"video",
"]",
"video",
"=",
"None",
"elif",
"not",
"isinstance",
"(",
"video",
",",
"str",
")",
":",
"raise",
"Exception",
"(",
"'video must be instance of InputFile or str'",
")",
"# required args",
"params",
"=",
"dict",
"(",
"chat_id",
"=",
"chat_id",
",",
"video",
"=",
"video",
")",
"# optional args",
"params",
".",
"update",
"(",
"_clean_params",
"(",
"duration",
"=",
"duration",
",",
"caption",
"=",
"caption",
",",
"reply_to_message_id",
"=",
"reply_to_message_id",
",",
"reply_markup",
"=",
"reply_markup",
")",
")",
"return",
"TelegramBotRPCRequest",
"(",
"'sendVideo'",
",",
"params",
"=",
"params",
",",
"files",
"=",
"files",
",",
"on_result",
"=",
"Message",
".",
"from_result",
",",
"*",
"*",
"kwargs",
")"
] | Use this method to send video files, Telegram clients support mp4 videos (other formats may be sent as Document).
:param chat_id: Unique identifier for the message recipient — User or GroupChat id
:param video: Video to send. You can either pass a file_id as String to resend a
video that is already on the Telegram servers, or upload a new video
using multipart/form-data.
:param duration: Duration of sent video in seconds
:param caption: Video caption (may also be used when resending videos by file_id)
:param reply_to_message_id: If the message is a reply, ID of the original message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param \*\*kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int
:type video: InputFile or str
:type duration: int
:type caption: str
:type reply_to_message_id: int
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:returns: On success, the sent Message is returned.
:rtype: TelegramBotRPCRequest | [
"Use",
"this",
"method",
"to",
"send",
"video",
"files",
"Telegram",
"clients",
"support",
"mp4",
"videos",
"(",
"other",
"formats",
"may",
"be",
"sent",
"as",
"Document",
")",
"."
] | python | train |
flowersteam/explauto | explauto/sensorimotor_model/inverse/cma.py | https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L1598-L1612 | def _inverse_i(self, y, i):
"""return inverse of y in component i"""
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
if 1 < 3:
if not lb <= y <= ub:
raise ValueError('argument of inverse must be within the given bounds')
if y < lb + al:
return (lb - al) + 2 * (al * (y - lb))**0.5
elif y < ub - au:
return y
else:
return (ub + au) - 2 * (au * (ub - y))**0.5 | [
"def",
"_inverse_i",
"(",
"self",
",",
"y",
",",
"i",
")",
":",
"lb",
"=",
"self",
".",
"_lb",
"[",
"self",
".",
"_index",
"(",
"i",
")",
"]",
"ub",
"=",
"self",
".",
"_ub",
"[",
"self",
".",
"_index",
"(",
"i",
")",
"]",
"al",
"=",
"self",
".",
"_al",
"[",
"self",
".",
"_index",
"(",
"i",
")",
"]",
"au",
"=",
"self",
".",
"_au",
"[",
"self",
".",
"_index",
"(",
"i",
")",
"]",
"if",
"1",
"<",
"3",
":",
"if",
"not",
"lb",
"<=",
"y",
"<=",
"ub",
":",
"raise",
"ValueError",
"(",
"'argument of inverse must be within the given bounds'",
")",
"if",
"y",
"<",
"lb",
"+",
"al",
":",
"return",
"(",
"lb",
"-",
"al",
")",
"+",
"2",
"*",
"(",
"al",
"*",
"(",
"y",
"-",
"lb",
")",
")",
"**",
"0.5",
"elif",
"y",
"<",
"ub",
"-",
"au",
":",
"return",
"y",
"else",
":",
"return",
"(",
"ub",
"+",
"au",
")",
"-",
"2",
"*",
"(",
"au",
"*",
"(",
"ub",
"-",
"y",
")",
")",
"**",
"0.5"
] | return inverse of y in component i | [
"return",
"inverse",
"of",
"y",
"in",
"component",
"i"
] | python | train |
BDNYC/astrodbkit | astrodbkit/astrodb.py | https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrodb.py#L1416-L1495 | def plot_spectrum(self, spectrum_id, table='spectra', column='spectrum', overplot=False, color='b', norm=False):
"""
Plots a spectrum from the given column and table
Parameters
----------
spectrum_id: int
The id from the table of the spectrum to plot.
overplot: bool
Overplot the spectrum
table: str
The table from which the plot is being made
column: str
The column with SPECTRUM data type to plot
color: str
The color used for the data
norm: bool, sequence
True or (min,max) wavelength range in which to normalize the spectrum
"""
# TODO: Look into axes number formats. As is it will sometimes not display any numbers for wavelength
i = self.query("SELECT * FROM {} WHERE id={}".format(table, spectrum_id), fetch='one', fmt='dict')
if i:
try:
spec = scrub(i[column].data, units=False)
w, f = spec[:2]
try:
e = spec[2]
except:
e = ''
# Draw the axes and add the metadata
if not overplot:
fig, ax = plt.subplots()
plt.rc('text', usetex=False)
ax.set_yscale('log', nonposy='clip')
plt.figtext(0.15, 0.88, '\n'.join(['{}: {}'.format(k, v) for k, v in i.items() if k != column]), \
verticalalignment='top')
try:
ax.set_xlabel(r'$\lambda$ [{}]'.format(i.get('wavelength_units')))
ax.set_ylabel(r'$F_\lambda$ [{}]'.format(i.get('flux_units')))
except:
pass
ax.legend(loc=8, frameon=False)
else:
ax = plt.gca()
# Normalize the data
if norm:
try:
if isinstance(norm, bool): norm = (min(w), max(w))
# Normalize to the specified window
norm_mask = np.logical_and(w >= norm[0], w <= norm[1])
C = 1. / np.trapz(f[norm_mask], x=w[norm_mask])
f *= C
try:
e *= C
except:
pass
except:
print('Could not normalize.')
# Plot the data
ax.loglog(w, f, c=color, label='spec_id: {}'.format(i['id']))
X, Y = plt.xlim(), plt.ylim()
try:
ax.fill_between(w, f - e, f + e, color=color, alpha=0.3), ax.set_xlim(X), ax.set_ylim(Y)
except:
print('No uncertainty array for spectrum {}'.format(spectrum_id))
plt.ion()
except IOError:
print("Could not plot spectrum {}".format(spectrum_id))
plt.close()
else:
print("No spectrum {} in the {} table.".format(spectrum_id, table.upper())) | [
"def",
"plot_spectrum",
"(",
"self",
",",
"spectrum_id",
",",
"table",
"=",
"'spectra'",
",",
"column",
"=",
"'spectrum'",
",",
"overplot",
"=",
"False",
",",
"color",
"=",
"'b'",
",",
"norm",
"=",
"False",
")",
":",
"# TODO: Look into axes number formats. As is it will sometimes not display any numbers for wavelength",
"i",
"=",
"self",
".",
"query",
"(",
"\"SELECT * FROM {} WHERE id={}\"",
".",
"format",
"(",
"table",
",",
"spectrum_id",
")",
",",
"fetch",
"=",
"'one'",
",",
"fmt",
"=",
"'dict'",
")",
"if",
"i",
":",
"try",
":",
"spec",
"=",
"scrub",
"(",
"i",
"[",
"column",
"]",
".",
"data",
",",
"units",
"=",
"False",
")",
"w",
",",
"f",
"=",
"spec",
"[",
":",
"2",
"]",
"try",
":",
"e",
"=",
"spec",
"[",
"2",
"]",
"except",
":",
"e",
"=",
"''",
"# Draw the axes and add the metadata",
"if",
"not",
"overplot",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
")",
"plt",
".",
"rc",
"(",
"'text'",
",",
"usetex",
"=",
"False",
")",
"ax",
".",
"set_yscale",
"(",
"'log'",
",",
"nonposy",
"=",
"'clip'",
")",
"plt",
".",
"figtext",
"(",
"0.15",
",",
"0.88",
",",
"'\\n'",
".",
"join",
"(",
"[",
"'{}: {}'",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"i",
".",
"items",
"(",
")",
"if",
"k",
"!=",
"column",
"]",
")",
",",
"verticalalignment",
"=",
"'top'",
")",
"try",
":",
"ax",
".",
"set_xlabel",
"(",
"r'$\\lambda$ [{}]'",
".",
"format",
"(",
"i",
".",
"get",
"(",
"'wavelength_units'",
")",
")",
")",
"ax",
".",
"set_ylabel",
"(",
"r'$F_\\lambda$ [{}]'",
".",
"format",
"(",
"i",
".",
"get",
"(",
"'flux_units'",
")",
")",
")",
"except",
":",
"pass",
"ax",
".",
"legend",
"(",
"loc",
"=",
"8",
",",
"frameon",
"=",
"False",
")",
"else",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"# Normalize the data",
"if",
"norm",
":",
"try",
":",
"if",
"isinstance",
"(",
"norm",
",",
"bool",
")",
":",
"norm",
"=",
"(",
"min",
"(",
"w",
")",
",",
"max",
"(",
"w",
")",
")",
"# Normalize to the specified window",
"norm_mask",
"=",
"np",
".",
"logical_and",
"(",
"w",
">=",
"norm",
"[",
"0",
"]",
",",
"w",
"<=",
"norm",
"[",
"1",
"]",
")",
"C",
"=",
"1.",
"/",
"np",
".",
"trapz",
"(",
"f",
"[",
"norm_mask",
"]",
",",
"x",
"=",
"w",
"[",
"norm_mask",
"]",
")",
"f",
"*=",
"C",
"try",
":",
"e",
"*=",
"C",
"except",
":",
"pass",
"except",
":",
"print",
"(",
"'Could not normalize.'",
")",
"# Plot the data",
"ax",
".",
"loglog",
"(",
"w",
",",
"f",
",",
"c",
"=",
"color",
",",
"label",
"=",
"'spec_id: {}'",
".",
"format",
"(",
"i",
"[",
"'id'",
"]",
")",
")",
"X",
",",
"Y",
"=",
"plt",
".",
"xlim",
"(",
")",
",",
"plt",
".",
"ylim",
"(",
")",
"try",
":",
"ax",
".",
"fill_between",
"(",
"w",
",",
"f",
"-",
"e",
",",
"f",
"+",
"e",
",",
"color",
"=",
"color",
",",
"alpha",
"=",
"0.3",
")",
",",
"ax",
".",
"set_xlim",
"(",
"X",
")",
",",
"ax",
".",
"set_ylim",
"(",
"Y",
")",
"except",
":",
"print",
"(",
"'No uncertainty array for spectrum {}'",
".",
"format",
"(",
"spectrum_id",
")",
")",
"plt",
".",
"ion",
"(",
")",
"except",
"IOError",
":",
"print",
"(",
"\"Could not plot spectrum {}\"",
".",
"format",
"(",
"spectrum_id",
")",
")",
"plt",
".",
"close",
"(",
")",
"else",
":",
"print",
"(",
"\"No spectrum {} in the {} table.\"",
".",
"format",
"(",
"spectrum_id",
",",
"table",
".",
"upper",
"(",
")",
")",
")"
] | Plots a spectrum from the given column and table
Parameters
----------
spectrum_id: int
The id from the table of the spectrum to plot.
overplot: bool
Overplot the spectrum
table: str
The table from which the plot is being made
column: str
The column with SPECTRUM data type to plot
color: str
The color used for the data
norm: bool, sequence
True or (min,max) wavelength range in which to normalize the spectrum | [
"Plots",
"a",
"spectrum",
"from",
"the",
"given",
"column",
"and",
"table"
] | python | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/boosted_trees_classifier.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/boosted_trees_classifier.py#L450-L638 | def create(dataset, target,
features=None, max_iterations=10,
validation_set='auto',
class_weights = None,
max_depth=6, step_size=0.3,
min_loss_reduction=0.0, min_child_weight=0.1,
row_subsample=1.0, column_subsample=1.0,
verbose=True,
random_seed = None,
metric='auto',
**kwargs):
"""
Create a (binary or multi-class) classifier model of type
:class:`~turicreate.boosted_trees_classifier.BoostedTreesClassifier` using
gradient boosted trees (sometimes known as GBMs).
Parameters
----------
dataset : SFrame
A training dataset containing feature columns and a target column.
target : str
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in alphabetical order of the variable values.
For example, a target variable with 'cat', 'dog', and 'foosa' as possible
values is mapped to 0, 1, and, 2 respectively.
features : list[str], optional
A list of columns names of features used for training the model.
Defaults to None, which uses all columns in the SFrame ``dataset``
excepting the target column..
max_iterations : int, optional
The maximum number of iterations for boosting. Each iteration results
in the creation of an extra tree.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. This is computed once per full iteration. Large
differences in model accuracy between the training data and validation
data is indicative of overfitting. The default value is 'auto'.
class_weights : {dict, `auto`}, optional
Weights the examples in the training data according to the given class
weights. If provided, the dictionary must contain a key for each class
label. The value can be any positive number greater than 1e-20. Weights
are interpreted as relative to each other. So setting the weights to be
2.0 for the positive class and 1.0 for the negative class has the same
effect as setting them to be 20.0 and 10.0, respectively. If set to
`None`, all classes are taken to have weight 1.0. The `auto` mode sets
the class weight to be inversely proportional to the number of examples
in the training data with the given class.
max_depth : float, optional
Maximum depth of a tree. Must be at least 1.
step_size : float, [0,1], optional
Step size (shrinkage) used in update to prevents overfitting. It
shrinks the prediction of each weak learner to make the boosting
process more conservative. The smaller the step size, the more conservative
the algorithm will be. Smaller step_size work well when
`max_iterations` is large.
min_loss_reduction : float, optional (non-negative)
Minimum loss reduction required to make a further partition/split a
node during the tree learning phase. Larger (more positive) values
can help prevent overfitting by avoiding splits that do not
sufficiently reduce the loss function.
min_child_weight : float, optional (non-negative)
Controls the minimum weight of each leaf node. Larger values result in
more conservative tree learning and help prevent overfitting.
Formally, this is minimum sum of instance weights (hessians) in each
node. If the tree learning algorithm results in a leaf node with the
sum of instance weights less than `min_child_weight`, tree building
will terminate.
row_subsample : float, [0,1], optional
Subsample the ratio of the training set in each iteration of tree
construction. This is called the bagging trick and can usually help
prevent overfitting. Setting this to a value of 0.5 results in the
model randomly sampling half of the examples (rows) to grow each tree.
column_subsample : float, [0,1], optional
Subsample ratio of the columns in each iteration of tree
construction. Like row_subsample, this can also help prevent
model overfitting. Setting this to a value of 0.5 results in the
model randomly sampling half of the columns to grow each tree.
verbose : boolean, optional
Print progress information during training (if set to true).
random_seed : int, optional
Seeds random opertations such as column and row subsampling, such that
results are reproducable.
metric : str or list[str], optional
Performance metric(s) that are tracked during training. When specified,
the progress table will display the tracked metric(s) on training and
validation set.
Supported metrics are: {'accuracy', 'auc', 'log_loss'}
kwargs : dict, optional
Additional arguments for training the model.
- ``early_stopping_rounds`` : int, default None
If the validation metric does not improve after <early_stopping_rounds>,
stop training and return the best model.
If multiple metrics are being tracked, the last one is used.
- ``model_checkpoint_path`` : str, default None
If specified, checkpoint the model training to the given path every n iterations,
where n is specified by ``model_checkpoint_interval``.
For instance, if `model_checkpoint_interval` is 5, and `model_checkpoint_path` is
set to ``/tmp/model_tmp``, the checkpoints will be saved into
``/tmp/model_tmp/model_checkpoint_5``, ``/tmp/model_tmp/model_checkpoint_10``, ... etc.
Training can be resumed by setting ``resume_from_checkpoint`` to one of these checkpoints.
- ``model_checkpoint_interval`` : int, default 5
If model_check_point_path is specified,
save the model to the given path every n iterations.
- ``resume_from_checkpoint`` : str, default None
Continues training from a model checkpoint. The model must take
exact the same training data as the checkpointed model.
Returns
-------
out : BoostedTreesClassifier
A trained gradient boosted trees model for classifications tasks.
References
----------
- `Wikipedia - Gradient tree boosting
<http://en.wikipedia.org/wiki/Gradient_boosting#Gradient_tree_boosting>`_
- `Trevor Hastie's slides on Boosted Trees and Random Forest
<http://jessica2.msri.org/attachments/10778/10778-boost.pdf>`_
See Also
--------
BoostedTreesClassifier, turicreate.logistic_classifier.LogisticClassifier, turicreate.svm_classifier.SVMClassifier
Examples
--------
.. sourcecode:: python
>>> url = 'https://static.turi.com/datasets/xgboost/mushroom.csv'
>>> data = turicreate.SFrame.read_csv(url)
>>> train, test = data.random_split(0.8)
>>> model = turicreate.boosted_trees_classifier.create(train, target='label')
>>> predictions = model.classify(test)
>>> results = model.evaluate(test)
"""
if random_seed is not None:
kwargs['random_seed'] = random_seed
if 'model_checkpoint_path' in kwargs:
kwargs['model_checkpoint_path'] = _make_internal_url(kwargs['model_checkpoint_path'])
if 'resume_from_checkpoint' in kwargs:
kwargs['resume_from_checkpoint'] = _make_internal_url(kwargs['resume_from_checkpoint'])
model = _sl.create(dataset = dataset,
target = target,
features = features,
model_name = 'boosted_trees_classifier',
max_iterations = max_iterations,
validation_set = validation_set,
class_weights = class_weights,
max_depth = max_depth,
step_size = step_size,
min_loss_reduction = min_loss_reduction,
min_child_weight = min_child_weight,
row_subsample = row_subsample,
column_subsample = column_subsample,
verbose = verbose,
metric = metric,
**kwargs)
return BoostedTreesClassifier(model.__proxy__) | [
"def",
"create",
"(",
"dataset",
",",
"target",
",",
"features",
"=",
"None",
",",
"max_iterations",
"=",
"10",
",",
"validation_set",
"=",
"'auto'",
",",
"class_weights",
"=",
"None",
",",
"max_depth",
"=",
"6",
",",
"step_size",
"=",
"0.3",
",",
"min_loss_reduction",
"=",
"0.0",
",",
"min_child_weight",
"=",
"0.1",
",",
"row_subsample",
"=",
"1.0",
",",
"column_subsample",
"=",
"1.0",
",",
"verbose",
"=",
"True",
",",
"random_seed",
"=",
"None",
",",
"metric",
"=",
"'auto'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"random_seed",
"is",
"not",
"None",
":",
"kwargs",
"[",
"'random_seed'",
"]",
"=",
"random_seed",
"if",
"'model_checkpoint_path'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'model_checkpoint_path'",
"]",
"=",
"_make_internal_url",
"(",
"kwargs",
"[",
"'model_checkpoint_path'",
"]",
")",
"if",
"'resume_from_checkpoint'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'resume_from_checkpoint'",
"]",
"=",
"_make_internal_url",
"(",
"kwargs",
"[",
"'resume_from_checkpoint'",
"]",
")",
"model",
"=",
"_sl",
".",
"create",
"(",
"dataset",
"=",
"dataset",
",",
"target",
"=",
"target",
",",
"features",
"=",
"features",
",",
"model_name",
"=",
"'boosted_trees_classifier'",
",",
"max_iterations",
"=",
"max_iterations",
",",
"validation_set",
"=",
"validation_set",
",",
"class_weights",
"=",
"class_weights",
",",
"max_depth",
"=",
"max_depth",
",",
"step_size",
"=",
"step_size",
",",
"min_loss_reduction",
"=",
"min_loss_reduction",
",",
"min_child_weight",
"=",
"min_child_weight",
",",
"row_subsample",
"=",
"row_subsample",
",",
"column_subsample",
"=",
"column_subsample",
",",
"verbose",
"=",
"verbose",
",",
"metric",
"=",
"metric",
",",
"*",
"*",
"kwargs",
")",
"return",
"BoostedTreesClassifier",
"(",
"model",
".",
"__proxy__",
")"
] | Create a (binary or multi-class) classifier model of type
:class:`~turicreate.boosted_trees_classifier.BoostedTreesClassifier` using
gradient boosted trees (sometimes known as GBMs).
Parameters
----------
dataset : SFrame
A training dataset containing feature columns and a target column.
target : str
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in alphabetical order of the variable values.
For example, a target variable with 'cat', 'dog', and 'foosa' as possible
values is mapped to 0, 1, and, 2 respectively.
features : list[str], optional
A list of columns names of features used for training the model.
Defaults to None, which uses all columns in the SFrame ``dataset``
excepting the target column..
max_iterations : int, optional
The maximum number of iterations for boosting. Each iteration results
in the creation of an extra tree.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. This is computed once per full iteration. Large
differences in model accuracy between the training data and validation
data is indicative of overfitting. The default value is 'auto'.
class_weights : {dict, `auto`}, optional
Weights the examples in the training data according to the given class
weights. If provided, the dictionary must contain a key for each class
label. The value can be any positive number greater than 1e-20. Weights
are interpreted as relative to each other. So setting the weights to be
2.0 for the positive class and 1.0 for the negative class has the same
effect as setting them to be 20.0 and 10.0, respectively. If set to
`None`, all classes are taken to have weight 1.0. The `auto` mode sets
the class weight to be inversely proportional to the number of examples
in the training data with the given class.
max_depth : float, optional
Maximum depth of a tree. Must be at least 1.
step_size : float, [0,1], optional
Step size (shrinkage) used in update to prevents overfitting. It
shrinks the prediction of each weak learner to make the boosting
process more conservative. The smaller the step size, the more conservative
the algorithm will be. Smaller step_size work well when
`max_iterations` is large.
min_loss_reduction : float, optional (non-negative)
Minimum loss reduction required to make a further partition/split a
node during the tree learning phase. Larger (more positive) values
can help prevent overfitting by avoiding splits that do not
sufficiently reduce the loss function.
min_child_weight : float, optional (non-negative)
Controls the minimum weight of each leaf node. Larger values result in
more conservative tree learning and help prevent overfitting.
Formally, this is minimum sum of instance weights (hessians) in each
node. If the tree learning algorithm results in a leaf node with the
sum of instance weights less than `min_child_weight`, tree building
will terminate.
row_subsample : float, [0,1], optional
Subsample the ratio of the training set in each iteration of tree
construction. This is called the bagging trick and can usually help
prevent overfitting. Setting this to a value of 0.5 results in the
model randomly sampling half of the examples (rows) to grow each tree.
column_subsample : float, [0,1], optional
Subsample ratio of the columns in each iteration of tree
construction. Like row_subsample, this can also help prevent
model overfitting. Setting this to a value of 0.5 results in the
model randomly sampling half of the columns to grow each tree.
verbose : boolean, optional
Print progress information during training (if set to true).
random_seed : int, optional
Seeds random opertations such as column and row subsampling, such that
results are reproducable.
metric : str or list[str], optional
Performance metric(s) that are tracked during training. When specified,
the progress table will display the tracked metric(s) on training and
validation set.
Supported metrics are: {'accuracy', 'auc', 'log_loss'}
kwargs : dict, optional
Additional arguments for training the model.
- ``early_stopping_rounds`` : int, default None
If the validation metric does not improve after <early_stopping_rounds>,
stop training and return the best model.
If multiple metrics are being tracked, the last one is used.
- ``model_checkpoint_path`` : str, default None
If specified, checkpoint the model training to the given path every n iterations,
where n is specified by ``model_checkpoint_interval``.
For instance, if `model_checkpoint_interval` is 5, and `model_checkpoint_path` is
set to ``/tmp/model_tmp``, the checkpoints will be saved into
``/tmp/model_tmp/model_checkpoint_5``, ``/tmp/model_tmp/model_checkpoint_10``, ... etc.
Training can be resumed by setting ``resume_from_checkpoint`` to one of these checkpoints.
- ``model_checkpoint_interval`` : int, default 5
If model_check_point_path is specified,
save the model to the given path every n iterations.
- ``resume_from_checkpoint`` : str, default None
Continues training from a model checkpoint. The model must take
exact the same training data as the checkpointed model.
Returns
-------
out : BoostedTreesClassifier
A trained gradient boosted trees model for classifications tasks.
References
----------
- `Wikipedia - Gradient tree boosting
<http://en.wikipedia.org/wiki/Gradient_boosting#Gradient_tree_boosting>`_
- `Trevor Hastie's slides on Boosted Trees and Random Forest
<http://jessica2.msri.org/attachments/10778/10778-boost.pdf>`_
See Also
--------
BoostedTreesClassifier, turicreate.logistic_classifier.LogisticClassifier, turicreate.svm_classifier.SVMClassifier
Examples
--------
.. sourcecode:: python
>>> url = 'https://static.turi.com/datasets/xgboost/mushroom.csv'
>>> data = turicreate.SFrame.read_csv(url)
>>> train, test = data.random_split(0.8)
>>> model = turicreate.boosted_trees_classifier.create(train, target='label')
>>> predictions = model.classify(test)
>>> results = model.evaluate(test) | [
"Create",
"a",
"(",
"binary",
"or",
"multi",
"-",
"class",
")",
"classifier",
"model",
"of",
"type",
":",
"class",
":",
"~turicreate",
".",
"boosted_trees_classifier",
".",
"BoostedTreesClassifier",
"using",
"gradient",
"boosted",
"trees",
"(",
"sometimes",
"known",
"as",
"GBMs",
")",
"."
] | python | train |
pywbem/pywbem | attic/cim_provider.py | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/attic/cim_provider.py#L661-L676 | def MI_deleteInstance(self,
env,
instanceName):
# pylint: disable=invalid-name
"""Delete a CIM instance
Implements the WBEM operation DeleteInstance in terms
of the delete_instance method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider MI_deleteInstance called...')
self.delete_instance(env=env, instance_name=instanceName)
logger.log_debug('CIMProvider MI_deleteInstance returning') | [
"def",
"MI_deleteInstance",
"(",
"self",
",",
"env",
",",
"instanceName",
")",
":",
"# pylint: disable=invalid-name",
"logger",
"=",
"env",
".",
"get_logger",
"(",
")",
"logger",
".",
"log_debug",
"(",
"'CIMProvider MI_deleteInstance called...'",
")",
"self",
".",
"delete_instance",
"(",
"env",
"=",
"env",
",",
"instance_name",
"=",
"instanceName",
")",
"logger",
".",
"log_debug",
"(",
"'CIMProvider MI_deleteInstance returning'",
")"
] | Delete a CIM instance
Implements the WBEM operation DeleteInstance in terms
of the delete_instance method. A derived class will not normally
override this method. | [
"Delete",
"a",
"CIM",
"instance"
] | python | train |
googleapis/dialogflow-python-client-v2 | dialogflow_v2/gapic/session_entity_types_client.py | https://github.com/googleapis/dialogflow-python-client-v2/blob/8c9c8709222efe427b76c9c8fcc04a0c4a0760b5/dialogflow_v2/gapic/session_entity_types_client.py#L484-L537 | def delete_session_entity_type(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes the specified session entity type.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.SessionEntityTypesClient()
>>>
>>> name = client.session_entity_type_path('[PROJECT]', '[SESSION]', '[ENTITY_TYPE]')
>>>
>>> client.delete_session_entity_type(name)
Args:
name (str): Required. The name of the entity type to delete. Format:
``projects/<Project ID>/agent/sessions/<Session ID>/entityTypes/<Entity Type
Display Name>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_session_entity_type' not in self._inner_api_calls:
self._inner_api_calls[
'delete_session_entity_type'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_session_entity_type,
default_retry=self._method_configs[
'DeleteSessionEntityType'].retry,
default_timeout=self._method_configs[
'DeleteSessionEntityType'].timeout,
client_info=self._client_info,
)
request = session_entity_type_pb2.DeleteSessionEntityTypeRequest(
name=name, )
self._inner_api_calls['delete_session_entity_type'](
request, retry=retry, timeout=timeout, metadata=metadata) | [
"def",
"delete_session_entity_type",
"(",
"self",
",",
"name",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"metadata",
"=",
"None",
")",
":",
"# Wrap the transport method to add retry and timeout logic.",
"if",
"'delete_session_entity_type'",
"not",
"in",
"self",
".",
"_inner_api_calls",
":",
"self",
".",
"_inner_api_calls",
"[",
"'delete_session_entity_type'",
"]",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"wrap_method",
"(",
"self",
".",
"transport",
".",
"delete_session_entity_type",
",",
"default_retry",
"=",
"self",
".",
"_method_configs",
"[",
"'DeleteSessionEntityType'",
"]",
".",
"retry",
",",
"default_timeout",
"=",
"self",
".",
"_method_configs",
"[",
"'DeleteSessionEntityType'",
"]",
".",
"timeout",
",",
"client_info",
"=",
"self",
".",
"_client_info",
",",
")",
"request",
"=",
"session_entity_type_pb2",
".",
"DeleteSessionEntityTypeRequest",
"(",
"name",
"=",
"name",
",",
")",
"self",
".",
"_inner_api_calls",
"[",
"'delete_session_entity_type'",
"]",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")"
] | Deletes the specified session entity type.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.SessionEntityTypesClient()
>>>
>>> name = client.session_entity_type_path('[PROJECT]', '[SESSION]', '[ENTITY_TYPE]')
>>>
>>> client.delete_session_entity_type(name)
Args:
name (str): Required. The name of the entity type to delete. Format:
``projects/<Project ID>/agent/sessions/<Session ID>/entityTypes/<Entity Type
Display Name>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | [
"Deletes",
"the",
"specified",
"session",
"entity",
"type",
"."
] | python | train |
openstack/networking-cisco | networking_cisco/plugins/cisco/cfg_agent/service_helpers/routing_svc_helper.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cfg_agent/service_helpers/routing_svc_helper.py#L317-L351 | def collect_state(self, configurations):
"""Collect state from this helper.
A set of attributes which summarizes the state of the routers and
configurations managed by this config agent.
:param configurations: dict of configuration values
:return dict of updated configuration values
"""
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
num_hd_routers = collections.defaultdict(int)
for ri in router_infos:
ex_gw_port = ri.router.get('gw_port')
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(
bc.constants.INTERFACE_KEY, []))
num_floating_ips += len(ri.router.get(
bc.constants.FLOATINGIP_KEY, []))
hd = ri.router['hosting_device']
if hd:
num_hd_routers[hd['id']] += 1
routers_per_hd = dict((hd_id, {'routers': num})
for hd_id, num in num_hd_routers.items())
non_responding = self._dev_status.get_backlogged_hosting_devices()
configurations['total routers'] = num_routers
configurations['total ex_gw_ports'] = num_ex_gw_ports
configurations['total interfaces'] = num_interfaces
configurations['total floating_ips'] = num_floating_ips
configurations['hosting_devices'] = routers_per_hd
configurations['non_responding_hosting_devices'] = non_responding
return configurations | [
"def",
"collect_state",
"(",
"self",
",",
"configurations",
")",
":",
"num_ex_gw_ports",
"=",
"0",
"num_interfaces",
"=",
"0",
"num_floating_ips",
"=",
"0",
"router_infos",
"=",
"self",
".",
"router_info",
".",
"values",
"(",
")",
"num_routers",
"=",
"len",
"(",
"router_infos",
")",
"num_hd_routers",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"for",
"ri",
"in",
"router_infos",
":",
"ex_gw_port",
"=",
"ri",
".",
"router",
".",
"get",
"(",
"'gw_port'",
")",
"if",
"ex_gw_port",
":",
"num_ex_gw_ports",
"+=",
"1",
"num_interfaces",
"+=",
"len",
"(",
"ri",
".",
"router",
".",
"get",
"(",
"bc",
".",
"constants",
".",
"INTERFACE_KEY",
",",
"[",
"]",
")",
")",
"num_floating_ips",
"+=",
"len",
"(",
"ri",
".",
"router",
".",
"get",
"(",
"bc",
".",
"constants",
".",
"FLOATINGIP_KEY",
",",
"[",
"]",
")",
")",
"hd",
"=",
"ri",
".",
"router",
"[",
"'hosting_device'",
"]",
"if",
"hd",
":",
"num_hd_routers",
"[",
"hd",
"[",
"'id'",
"]",
"]",
"+=",
"1",
"routers_per_hd",
"=",
"dict",
"(",
"(",
"hd_id",
",",
"{",
"'routers'",
":",
"num",
"}",
")",
"for",
"hd_id",
",",
"num",
"in",
"num_hd_routers",
".",
"items",
"(",
")",
")",
"non_responding",
"=",
"self",
".",
"_dev_status",
".",
"get_backlogged_hosting_devices",
"(",
")",
"configurations",
"[",
"'total routers'",
"]",
"=",
"num_routers",
"configurations",
"[",
"'total ex_gw_ports'",
"]",
"=",
"num_ex_gw_ports",
"configurations",
"[",
"'total interfaces'",
"]",
"=",
"num_interfaces",
"configurations",
"[",
"'total floating_ips'",
"]",
"=",
"num_floating_ips",
"configurations",
"[",
"'hosting_devices'",
"]",
"=",
"routers_per_hd",
"configurations",
"[",
"'non_responding_hosting_devices'",
"]",
"=",
"non_responding",
"return",
"configurations"
] | Collect state from this helper.
A set of attributes which summarizes the state of the routers and
configurations managed by this config agent.
:param configurations: dict of configuration values
:return dict of updated configuration values | [
"Collect",
"state",
"from",
"this",
"helper",
"."
] | python | train |
leancloud/python-sdk | leancloud/query.py | https://github.com/leancloud/python-sdk/blob/fea3240257ce65e6a32c7312a5cee1f94a51a587/leancloud/query.py#L636-L648 | def within_kilometers(self, key, point, max_distance, min_distance=None):
"""
增加查询条件,限制返回结果指定字段值的位置在某点的一段距离之内。
:param key: 查询条件字段名
:param point: 查询地理位置
:param max_distance: 最大距离限定(千米)
:param min_distance: 最小距离限定(千米)
:rtype: Query
"""
if min_distance is not None:
min_distance = min_distance / 6371.0
return self.within_radians(key, point, max_distance / 6371.0, min_distance) | [
"def",
"within_kilometers",
"(",
"self",
",",
"key",
",",
"point",
",",
"max_distance",
",",
"min_distance",
"=",
"None",
")",
":",
"if",
"min_distance",
"is",
"not",
"None",
":",
"min_distance",
"=",
"min_distance",
"/",
"6371.0",
"return",
"self",
".",
"within_radians",
"(",
"key",
",",
"point",
",",
"max_distance",
"/",
"6371.0",
",",
"min_distance",
")"
] | 增加查询条件,限制返回结果指定字段值的位置在某点的一段距离之内。
:param key: 查询条件字段名
:param point: 查询地理位置
:param max_distance: 最大距离限定(千米)
:param min_distance: 最小距离限定(千米)
:rtype: Query | [
"增加查询条件,限制返回结果指定字段值的位置在某点的一段距离之内。"
] | python | train |
b3j0f/aop | b3j0f/aop/advice/core.py | https://github.com/b3j0f/aop/blob/22b9ba335d103edd929c25eb6dbb94037d3615bc/b3j0f/aop/advice/core.py#L452-L471 | def _unweave(target, advices, pointcut, ctx, depth, depth_predicate):
"""Unweave deeply advices in target."""
# if weaving has to be done
if pointcut is None or pointcut(target):
# do something only if target is intercepted
if is_intercepted(target):
_remove_advices(target=target, advices=advices, ctx=ctx)
# search inside the target
if depth > 0: # for an object or a class, weave on methods
# get base ctx
_base_ctx = None
if ctx is not None:
_base_ctx = base_ctx(ctx)
for _, member in getmembers(target, depth_predicate):
_unweave(
target=member, advices=advices, pointcut=pointcut,
depth=depth - 1, depth_predicate=depth_predicate, ctx=_base_ctx
) | [
"def",
"_unweave",
"(",
"target",
",",
"advices",
",",
"pointcut",
",",
"ctx",
",",
"depth",
",",
"depth_predicate",
")",
":",
"# if weaving has to be done",
"if",
"pointcut",
"is",
"None",
"or",
"pointcut",
"(",
"target",
")",
":",
"# do something only if target is intercepted",
"if",
"is_intercepted",
"(",
"target",
")",
":",
"_remove_advices",
"(",
"target",
"=",
"target",
",",
"advices",
"=",
"advices",
",",
"ctx",
"=",
"ctx",
")",
"# search inside the target",
"if",
"depth",
">",
"0",
":",
"# for an object or a class, weave on methods",
"# get base ctx",
"_base_ctx",
"=",
"None",
"if",
"ctx",
"is",
"not",
"None",
":",
"_base_ctx",
"=",
"base_ctx",
"(",
"ctx",
")",
"for",
"_",
",",
"member",
"in",
"getmembers",
"(",
"target",
",",
"depth_predicate",
")",
":",
"_unweave",
"(",
"target",
"=",
"member",
",",
"advices",
"=",
"advices",
",",
"pointcut",
"=",
"pointcut",
",",
"depth",
"=",
"depth",
"-",
"1",
",",
"depth_predicate",
"=",
"depth_predicate",
",",
"ctx",
"=",
"_base_ctx",
")"
] | Unweave deeply advices in target. | [
"Unweave",
"deeply",
"advices",
"in",
"target",
"."
] | python | train |
yyuu/botornado | boto/cloudfront/distribution.py | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/cloudfront/distribution.py#L403-L414 | def get_objects(self):
"""
Return a list of all content objects in this distribution.
:rtype: list of :class:`boto.cloudfront.object.Object`
:return: The content objects
"""
bucket = self._get_bucket()
objs = []
for key in bucket:
objs.append(key)
return objs | [
"def",
"get_objects",
"(",
"self",
")",
":",
"bucket",
"=",
"self",
".",
"_get_bucket",
"(",
")",
"objs",
"=",
"[",
"]",
"for",
"key",
"in",
"bucket",
":",
"objs",
".",
"append",
"(",
"key",
")",
"return",
"objs"
] | Return a list of all content objects in this distribution.
:rtype: list of :class:`boto.cloudfront.object.Object`
:return: The content objects | [
"Return",
"a",
"list",
"of",
"all",
"content",
"objects",
"in",
"this",
"distribution",
".",
":",
"rtype",
":",
"list",
"of",
":",
"class",
":",
"boto",
".",
"cloudfront",
".",
"object",
".",
"Object",
":",
"return",
":",
"The",
"content",
"objects"
] | python | train |
nion-software/nionswift-io | nionswift_plugin/TIFF_IO/tifffile.py | https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9851-L9879 | def clean_offsetscounts(offsets, counts):
"""Return cleaned offsets and byte counts.
Remove zero offsets and counts.
Use to sanitize StripOffsets and StripByteCounts tag values.
"""
# TODO: cythonize this
offsets = list(offsets)
counts = list(counts)
size = len(offsets)
if size != len(counts):
raise ValueError('StripOffsets and StripByteCounts mismatch')
j = 0
for i, (o, b) in enumerate(zip(offsets, counts)):
if b > 0:
if o > 0:
if i > j:
offsets[j] = o
counts[j] = b
j += 1
continue
raise ValueError('invalid offset')
log.warning('clean_offsetscounts: empty bytecount')
if size == len(offsets):
return offsets, counts
if j == 0:
return [offsets[0]], [counts[0]]
return offsets[:j], counts[:j] | [
"def",
"clean_offsetscounts",
"(",
"offsets",
",",
"counts",
")",
":",
"# TODO: cythonize this",
"offsets",
"=",
"list",
"(",
"offsets",
")",
"counts",
"=",
"list",
"(",
"counts",
")",
"size",
"=",
"len",
"(",
"offsets",
")",
"if",
"size",
"!=",
"len",
"(",
"counts",
")",
":",
"raise",
"ValueError",
"(",
"'StripOffsets and StripByteCounts mismatch'",
")",
"j",
"=",
"0",
"for",
"i",
",",
"(",
"o",
",",
"b",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"offsets",
",",
"counts",
")",
")",
":",
"if",
"b",
">",
"0",
":",
"if",
"o",
">",
"0",
":",
"if",
"i",
">",
"j",
":",
"offsets",
"[",
"j",
"]",
"=",
"o",
"counts",
"[",
"j",
"]",
"=",
"b",
"j",
"+=",
"1",
"continue",
"raise",
"ValueError",
"(",
"'invalid offset'",
")",
"log",
".",
"warning",
"(",
"'clean_offsetscounts: empty bytecount'",
")",
"if",
"size",
"==",
"len",
"(",
"offsets",
")",
":",
"return",
"offsets",
",",
"counts",
"if",
"j",
"==",
"0",
":",
"return",
"[",
"offsets",
"[",
"0",
"]",
"]",
",",
"[",
"counts",
"[",
"0",
"]",
"]",
"return",
"offsets",
"[",
":",
"j",
"]",
",",
"counts",
"[",
":",
"j",
"]"
] | Return cleaned offsets and byte counts.
Remove zero offsets and counts.
Use to sanitize StripOffsets and StripByteCounts tag values. | [
"Return",
"cleaned",
"offsets",
"and",
"byte",
"counts",
"."
] | python | train |
475Cumulus/TBone | tbone/resources/mongo.py | https://github.com/475Cumulus/TBone/blob/5a6672d8bbac449a0ab9e99560609f671fe84d4d/tbone/resources/mongo.py#L229-L240 | def build_sort(self, **kwargs):
''' Break url parameters and turn into sort arguments '''
sort = []
order = kwargs.get('order_by', None)
if order:
if type(order) is list:
order = order[0]
if order[:1] == '-':
sort.append((order[1:], -1))
else:
sort.append((order, 1))
return sort | [
"def",
"build_sort",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"sort",
"=",
"[",
"]",
"order",
"=",
"kwargs",
".",
"get",
"(",
"'order_by'",
",",
"None",
")",
"if",
"order",
":",
"if",
"type",
"(",
"order",
")",
"is",
"list",
":",
"order",
"=",
"order",
"[",
"0",
"]",
"if",
"order",
"[",
":",
"1",
"]",
"==",
"'-'",
":",
"sort",
".",
"append",
"(",
"(",
"order",
"[",
"1",
":",
"]",
",",
"-",
"1",
")",
")",
"else",
":",
"sort",
".",
"append",
"(",
"(",
"order",
",",
"1",
")",
")",
"return",
"sort"
] | Break url parameters and turn into sort arguments | [
"Break",
"url",
"parameters",
"and",
"turn",
"into",
"sort",
"arguments"
] | python | train |
fedora-python/pyp2rpm | pyp2rpm/utils.py | https://github.com/fedora-python/pyp2rpm/blob/853eb3d226689a5ccdcdb9358b1a3394fafbd2b5/pyp2rpm/utils.py#L44-L56 | def memoize_by_args(func):
"""Memoizes return value of a func based on args."""
memory = {}
@functools.wraps(func)
def memoized(*args):
if args not in memory.keys():
value = func(*args)
memory[args] = value
return memory[args]
return memoized | [
"def",
"memoize_by_args",
"(",
"func",
")",
":",
"memory",
"=",
"{",
"}",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"memoized",
"(",
"*",
"args",
")",
":",
"if",
"args",
"not",
"in",
"memory",
".",
"keys",
"(",
")",
":",
"value",
"=",
"func",
"(",
"*",
"args",
")",
"memory",
"[",
"args",
"]",
"=",
"value",
"return",
"memory",
"[",
"args",
"]",
"return",
"memoized"
] | Memoizes return value of a func based on args. | [
"Memoizes",
"return",
"value",
"of",
"a",
"func",
"based",
"on",
"args",
"."
] | python | train |
yodle/docker-registry-client | docker_registry_client/_BaseClient.py | https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L229-L269 | def _http_response(self, url, method, data=None, content_type=None,
schema=None, **kwargs):
"""url -> full target url
method -> method from requests
data -> request body
kwargs -> url formatting args
"""
if schema is None:
schema = self.schema_2
header = {
'content-type': content_type or 'application/json',
'Accept': schema,
}
# Token specific part. We add the token in the header if necessary
auth = self.auth
token_required = auth.token_required
token = auth.token
desired_scope = auth.desired_scope
scope = auth.scope
if token_required:
if not token or desired_scope != scope:
logger.debug("Getting new token for scope: %s", desired_scope)
auth.get_new_token()
header['Authorization'] = 'Bearer %s' % self.auth.token
if data and not content_type:
data = json.dumps(data)
path = url.format(**kwargs)
logger.debug("%s %s", method.__name__.upper(), path)
response = method(self.host + path,
data=data, headers=header, **self.method_kwargs)
logger.debug("%s %s", response.status_code, response.reason)
response.raise_for_status()
return response | [
"def",
"_http_response",
"(",
"self",
",",
"url",
",",
"method",
",",
"data",
"=",
"None",
",",
"content_type",
"=",
"None",
",",
"schema",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"schema",
"is",
"None",
":",
"schema",
"=",
"self",
".",
"schema_2",
"header",
"=",
"{",
"'content-type'",
":",
"content_type",
"or",
"'application/json'",
",",
"'Accept'",
":",
"schema",
",",
"}",
"# Token specific part. We add the token in the header if necessary",
"auth",
"=",
"self",
".",
"auth",
"token_required",
"=",
"auth",
".",
"token_required",
"token",
"=",
"auth",
".",
"token",
"desired_scope",
"=",
"auth",
".",
"desired_scope",
"scope",
"=",
"auth",
".",
"scope",
"if",
"token_required",
":",
"if",
"not",
"token",
"or",
"desired_scope",
"!=",
"scope",
":",
"logger",
".",
"debug",
"(",
"\"Getting new token for scope: %s\"",
",",
"desired_scope",
")",
"auth",
".",
"get_new_token",
"(",
")",
"header",
"[",
"'Authorization'",
"]",
"=",
"'Bearer %s'",
"%",
"self",
".",
"auth",
".",
"token",
"if",
"data",
"and",
"not",
"content_type",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"path",
"=",
"url",
".",
"format",
"(",
"*",
"*",
"kwargs",
")",
"logger",
".",
"debug",
"(",
"\"%s %s\"",
",",
"method",
".",
"__name__",
".",
"upper",
"(",
")",
",",
"path",
")",
"response",
"=",
"method",
"(",
"self",
".",
"host",
"+",
"path",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"header",
",",
"*",
"*",
"self",
".",
"method_kwargs",
")",
"logger",
".",
"debug",
"(",
"\"%s %s\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"reason",
")",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"response"
] | url -> full target url
method -> method from requests
data -> request body
kwargs -> url formatting args | [
"url",
"-",
">",
"full",
"target",
"url",
"method",
"-",
">",
"method",
"from",
"requests",
"data",
"-",
">",
"request",
"body",
"kwargs",
"-",
">",
"url",
"formatting",
"args"
] | python | train |
DerwenAI/pytextrank | pytextrank/pytextrank.py | https://github.com/DerwenAI/pytextrank/blob/181ea41375d29922eb96768cf6550e57a77a0c95/pytextrank/pytextrank.py#L414-L429 | def enumerate_chunks (phrase, spacy_nlp):
"""
iterate through the noun phrases
"""
if (len(phrase) > 1):
found = False
text = " ".join([rl.text for rl in phrase])
doc = spacy_nlp(text.strip(), parse=True)
for np in doc.noun_chunks:
if np.text != text:
found = True
yield np.text, find_chunk(phrase, np.text.split(" "))
if not found and all([rl.pos[0] != "v" for rl in phrase]):
yield text, phrase | [
"def",
"enumerate_chunks",
"(",
"phrase",
",",
"spacy_nlp",
")",
":",
"if",
"(",
"len",
"(",
"phrase",
")",
">",
"1",
")",
":",
"found",
"=",
"False",
"text",
"=",
"\" \"",
".",
"join",
"(",
"[",
"rl",
".",
"text",
"for",
"rl",
"in",
"phrase",
"]",
")",
"doc",
"=",
"spacy_nlp",
"(",
"text",
".",
"strip",
"(",
")",
",",
"parse",
"=",
"True",
")",
"for",
"np",
"in",
"doc",
".",
"noun_chunks",
":",
"if",
"np",
".",
"text",
"!=",
"text",
":",
"found",
"=",
"True",
"yield",
"np",
".",
"text",
",",
"find_chunk",
"(",
"phrase",
",",
"np",
".",
"text",
".",
"split",
"(",
"\" \"",
")",
")",
"if",
"not",
"found",
"and",
"all",
"(",
"[",
"rl",
".",
"pos",
"[",
"0",
"]",
"!=",
"\"v\"",
"for",
"rl",
"in",
"phrase",
"]",
")",
":",
"yield",
"text",
",",
"phrase"
] | iterate through the noun phrases | [
"iterate",
"through",
"the",
"noun",
"phrases"
] | python | valid |
zyga/morris | morris/__init__.py | https://github.com/zyga/morris/blob/7cd6da662c8c95b93b5fb8bb25eae8686becf31a/morris/__init__.py#L828-L854 | def assertSignalOrdering(self, *expected_events):
"""
Assert that a signals were fired in a specific sequence.
:param expected_events:
A (varadic) list of events describing the signals that were fired
Each element is a 3-tuple (signal, args, kwargs) that describes
the event.
.. note::
If you are using :meth:`assertSignalFired()` then the return value
of that method is a single event that can be passed to this method
"""
expected_order = [self._events_seen.index(event)
for event in expected_events]
actual_order = sorted(expected_order)
self.assertEqual(
expected_order, actual_order,
"\nExpected order of fired signals:\n{}\n"
"Actual order observed:\n{}".format(
"\n".join(
"\t{}: {}".format(i, event)
for i, event in enumerate(expected_events, 1)),
"\n".join(
"\t{}: {}".format(i, event)
for i, event in enumerate(
(self._events_seen[idx] for idx in actual_order), 1)))) | [
"def",
"assertSignalOrdering",
"(",
"self",
",",
"*",
"expected_events",
")",
":",
"expected_order",
"=",
"[",
"self",
".",
"_events_seen",
".",
"index",
"(",
"event",
")",
"for",
"event",
"in",
"expected_events",
"]",
"actual_order",
"=",
"sorted",
"(",
"expected_order",
")",
"self",
".",
"assertEqual",
"(",
"expected_order",
",",
"actual_order",
",",
"\"\\nExpected order of fired signals:\\n{}\\n\"",
"\"Actual order observed:\\n{}\"",
".",
"format",
"(",
"\"\\n\"",
".",
"join",
"(",
"\"\\t{}: {}\"",
".",
"format",
"(",
"i",
",",
"event",
")",
"for",
"i",
",",
"event",
"in",
"enumerate",
"(",
"expected_events",
",",
"1",
")",
")",
",",
"\"\\n\"",
".",
"join",
"(",
"\"\\t{}: {}\"",
".",
"format",
"(",
"i",
",",
"event",
")",
"for",
"i",
",",
"event",
"in",
"enumerate",
"(",
"(",
"self",
".",
"_events_seen",
"[",
"idx",
"]",
"for",
"idx",
"in",
"actual_order",
")",
",",
"1",
")",
")",
")",
")"
] | Assert that a signals were fired in a specific sequence.
:param expected_events:
A (varadic) list of events describing the signals that were fired
Each element is a 3-tuple (signal, args, kwargs) that describes
the event.
.. note::
If you are using :meth:`assertSignalFired()` then the return value
of that method is a single event that can be passed to this method | [
"Assert",
"that",
"a",
"signals",
"were",
"fired",
"in",
"a",
"specific",
"sequence",
"."
] | python | train |
mongodb/mongo-python-driver | pymongo/collection.py | https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/collection.py#L1877-L1974 | def create_index(self, keys, session=None, **kwargs):
"""Creates an index on this collection.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
To create a single key ascending index on the key ``'mike'`` we just
use a string argument::
>>> my_collection.create_index("mike")
For a compound index on ``'mike'`` descending and ``'eliot'``
ascending we need to use a list of tuples::
>>> my_collection.create_index([("mike", pymongo.DESCENDING),
... ("eliot", pymongo.ASCENDING)])
All optional index creation parameters should be passed as
keyword arguments to this method. For example::
>>> my_collection.create_index([("mike", pymongo.DESCENDING)],
... background=True)
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True`` creates a uniqueness constraint on the index.
- `background`: if ``True`` this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
See the MongoDB documentation for a full list of supported options by
server version.
.. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The
option is silently ignored by the server and unique index builds
using the option will fail if a duplicate value is detected.
.. note:: `partialFilterExpression` requires server version **>= 3.2**
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for passing maxTimeMS
in kwargs.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.
.. versionchanged:: 3.0
Renamed `key_or_list` to `keys`. Removed the `cache_for` option.
:meth:`create_index` no longer caches index names. Removed support
for the drop_dups and bucket_size aliases.
.. mongodoc:: indexes
"""
keys = helpers._index_list(keys)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
cmd_options = {}
if "maxTimeMS" in kwargs:
cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS")
self.__create_index(keys, kwargs, session, **cmd_options)
return name | [
"def",
"create_index",
"(",
"self",
",",
"keys",
",",
"session",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"keys",
"=",
"helpers",
".",
"_index_list",
"(",
"keys",
")",
"name",
"=",
"kwargs",
".",
"setdefault",
"(",
"\"name\"",
",",
"helpers",
".",
"_gen_index_name",
"(",
"keys",
")",
")",
"cmd_options",
"=",
"{",
"}",
"if",
"\"maxTimeMS\"",
"in",
"kwargs",
":",
"cmd_options",
"[",
"\"maxTimeMS\"",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"\"maxTimeMS\"",
")",
"self",
".",
"__create_index",
"(",
"keys",
",",
"kwargs",
",",
"session",
",",
"*",
"*",
"cmd_options",
")",
"return",
"name"
] | Creates an index on this collection.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
To create a single key ascending index on the key ``'mike'`` we just
use a string argument::
>>> my_collection.create_index("mike")
For a compound index on ``'mike'`` descending and ``'eliot'``
ascending we need to use a list of tuples::
>>> my_collection.create_index([("mike", pymongo.DESCENDING),
... ("eliot", pymongo.ASCENDING)])
All optional index creation parameters should be passed as
keyword arguments to this method. For example::
>>> my_collection.create_index([("mike", pymongo.DESCENDING)],
... background=True)
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True`` creates a uniqueness constraint on the index.
- `background`: if ``True`` this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
See the MongoDB documentation for a full list of supported options by
server version.
.. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The
option is silently ignored by the server and unique index builds
using the option will fail if a duplicate value is detected.
.. note:: `partialFilterExpression` requires server version **>= 3.2**
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for passing maxTimeMS
in kwargs.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.
.. versionchanged:: 3.0
Renamed `key_or_list` to `keys`. Removed the `cache_for` option.
:meth:`create_index` no longer caches index names. Removed support
for the drop_dups and bucket_size aliases.
.. mongodoc:: indexes | [
"Creates",
"an",
"index",
"on",
"this",
"collection",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.