Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
9,800 | cni/MRS | MRS/freesurfer.py | reconall | def reconall(subjfile, subjID=None, subjdir=None, runreconall=True):
"""
Carries out Freesurfer's reconall on T1 nifti file
WARNING: Reconall takes very long to run!!
http://nipy.sourceforge.net/nipype/users/examples/smri_freesurfer.html
Parameters
----------
subjfile: nifti file
Path to subject's T1 nifti file
subjID: string
optional name for subject's output folder
subjdir: string
The directory to where segmentation results should be saved. Defaults
to same directory as subjfile.
runreconall: boolean
If set to true, runs reconall, otherwise just converts assorted mgz
files to nii
"""
T1dir = os.path.dirname(subjfile)
filename = os.path.basename(subjfile)
# subject ID
if subjID==None:
m=re.search('(\w+?)_*_', subjfile)
subjID=m.group(0) + 'seg'
# Tell freesurfer what subjects directory to use
if subjdir==None:
subjdir=T1dir
fs.FSCommand.set_default_subjects_dir(subjdir)
segdir=subjdir+'/'+subjID+'/'
print('saving to ' + subjdir)
# check if file exists
if os.path.isfile(subjfile):
print('running recon-all on ' + filename)
else:
raise ValueError("File: %s does not exist!"%filename)
# check if nifti format
ext=filename.split('.')[1].lower()
if ext != "nii":
raise ValueError("File: %s is not a nifti file!"%filename)
wf = pe.Workflow(name="segment")
wf.base_dir = T1dir
if runreconall:
# run recon-all
reconall = pe.Node(interface=fs.ReconAll(), name='reconall')
reconall.inputs.subject_id = subjID
reconall.inputs.directive = 'all'
reconall.inputs.subjects_dir = subjdir
reconall.inputs.T1_files = subjfile
wf.add_nodes([reconall])
result = wf.run()
# convert mgz to nii
wf2 = pe.Workflow(name="convertmgz")
wf2.base_dir = T1dir
convertmgz = pe.Node(interface=fs.MRIConvert(), name='convertmgz')
convertmgz.inputs.in_file = segdir+'mri/aseg.auto.mgz'
convertmgz.inputs.out_orientation='LPS'
convertmgz.inputs.resample_type= 'nearest'
convertmgz.inputs.reslice_like= subjfile
convertmgz.inputs.out_file=segdir+subjID+'_aseg.nii.gz'
wf2.add_nodes([convertmgz])
result2 = wf2.run()
if runreconall:
return (result, result2)
else:
return (result2) | python | def reconall(subjfile, subjID=None, subjdir=None, runreconall=True):
"""
Carries out Freesurfer's reconall on T1 nifti file
WARNING: Reconall takes very long to run!!
http://nipy.sourceforge.net/nipype/users/examples/smri_freesurfer.html
Parameters
----------
subjfile: nifti file
Path to subject's T1 nifti file
subjID: string
optional name for subject's output folder
subjdir: string
The directory to where segmentation results should be saved. Defaults
to same directory as subjfile.
runreconall: boolean
If set to true, runs reconall, otherwise just converts assorted mgz
files to nii
"""
T1dir = os.path.dirname(subjfile)
filename = os.path.basename(subjfile)
# subject ID
if subjID==None:
m=re.search('(\w+?)_*_', subjfile)
subjID=m.group(0) + 'seg'
# Tell freesurfer what subjects directory to use
if subjdir==None:
subjdir=T1dir
fs.FSCommand.set_default_subjects_dir(subjdir)
segdir=subjdir+'/'+subjID+'/'
print('saving to ' + subjdir)
# check if file exists
if os.path.isfile(subjfile):
print('running recon-all on ' + filename)
else:
raise ValueError("File: %s does not exist!"%filename)
# check if nifti format
ext=filename.split('.')[1].lower()
if ext != "nii":
raise ValueError("File: %s is not a nifti file!"%filename)
wf = pe.Workflow(name="segment")
wf.base_dir = T1dir
if runreconall:
# run recon-all
reconall = pe.Node(interface=fs.ReconAll(), name='reconall')
reconall.inputs.subject_id = subjID
reconall.inputs.directive = 'all'
reconall.inputs.subjects_dir = subjdir
reconall.inputs.T1_files = subjfile
wf.add_nodes([reconall])
result = wf.run()
# convert mgz to nii
wf2 = pe.Workflow(name="convertmgz")
wf2.base_dir = T1dir
convertmgz = pe.Node(interface=fs.MRIConvert(), name='convertmgz')
convertmgz.inputs.in_file = segdir+'mri/aseg.auto.mgz'
convertmgz.inputs.out_orientation='LPS'
convertmgz.inputs.resample_type= 'nearest'
convertmgz.inputs.reslice_like= subjfile
convertmgz.inputs.out_file=segdir+subjID+'_aseg.nii.gz'
wf2.add_nodes([convertmgz])
result2 = wf2.run()
if runreconall:
return (result, result2)
else:
return (result2) | ['def', 'reconall', '(', 'subjfile', ',', 'subjID', '=', 'None', ',', 'subjdir', '=', 'None', ',', 'runreconall', '=', 'True', ')', ':', 'T1dir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'subjfile', ')', 'filename', '=', 'os', '.', 'path', '.', 'basename', '(', 'subjfile', ')', '# subject ID', 'if', 'subjID', '==', 'None', ':', 'm', '=', 're', '.', 'search', '(', "'(\\w+?)_*_'", ',', 'subjfile', ')', 'subjID', '=', 'm', '.', 'group', '(', '0', ')', '+', "'seg'", '# Tell freesurfer what subjects directory to use', 'if', 'subjdir', '==', 'None', ':', 'subjdir', '=', 'T1dir', 'fs', '.', 'FSCommand', '.', 'set_default_subjects_dir', '(', 'subjdir', ')', 'segdir', '=', 'subjdir', '+', "'/'", '+', 'subjID', '+', "'/'", 'print', '(', "'saving to '", '+', 'subjdir', ')', '# check if file exists', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'subjfile', ')', ':', 'print', '(', "'running recon-all on '", '+', 'filename', ')', 'else', ':', 'raise', 'ValueError', '(', '"File: %s does not exist!"', '%', 'filename', ')', '# check if nifti format', 'ext', '=', 'filename', '.', 'split', '(', "'.'", ')', '[', '1', ']', '.', 'lower', '(', ')', 'if', 'ext', '!=', '"nii"', ':', 'raise', 'ValueError', '(', '"File: %s is not a nifti file!"', '%', 'filename', ')', 'wf', '=', 'pe', '.', 'Workflow', '(', 'name', '=', '"segment"', ')', 'wf', '.', 'base_dir', '=', 'T1dir', 'if', 'runreconall', ':', '# run recon-all', 'reconall', '=', 'pe', '.', 'Node', '(', 'interface', '=', 'fs', '.', 'ReconAll', '(', ')', ',', 'name', '=', "'reconall'", ')', 'reconall', '.', 'inputs', '.', 'subject_id', '=', 'subjID', 'reconall', '.', 'inputs', '.', 'directive', '=', "'all'", 'reconall', '.', 'inputs', '.', 'subjects_dir', '=', 'subjdir', 'reconall', '.', 'inputs', '.', 'T1_files', '=', 'subjfile', 'wf', '.', 'add_nodes', '(', '[', 'reconall', ']', ')', 'result', '=', 'wf', '.', 'run', '(', ')', '# convert mgz to nii', 'wf2', '=', 'pe', '.', 'Workflow', '(', 'name', '=', '"convertmgz"', ')', 'wf2', '.', 'base_dir', '=', 'T1dir', 'convertmgz', '=', 'pe', '.', 'Node', '(', 'interface', '=', 'fs', '.', 'MRIConvert', '(', ')', ',', 'name', '=', "'convertmgz'", ')', 'convertmgz', '.', 'inputs', '.', 'in_file', '=', 'segdir', '+', "'mri/aseg.auto.mgz'", 'convertmgz', '.', 'inputs', '.', 'out_orientation', '=', "'LPS'", 'convertmgz', '.', 'inputs', '.', 'resample_type', '=', "'nearest'", 'convertmgz', '.', 'inputs', '.', 'reslice_like', '=', 'subjfile', 'convertmgz', '.', 'inputs', '.', 'out_file', '=', 'segdir', '+', 'subjID', '+', "'_aseg.nii.gz'", 'wf2', '.', 'add_nodes', '(', '[', 'convertmgz', ']', ')', 'result2', '=', 'wf2', '.', 'run', '(', ')', 'if', 'runreconall', ':', 'return', '(', 'result', ',', 'result2', ')', 'else', ':', 'return', '(', 'result2', ')'] | Carries out Freesurfer's reconall on T1 nifti file
WARNING: Reconall takes very long to run!!
http://nipy.sourceforge.net/nipype/users/examples/smri_freesurfer.html
Parameters
----------
subjfile: nifti file
Path to subject's T1 nifti file
subjID: string
optional name for subject's output folder
subjdir: string
The directory to where segmentation results should be saved. Defaults
to same directory as subjfile.
runreconall: boolean
If set to true, runs reconall, otherwise just converts assorted mgz
files to nii | ['Carries', 'out', 'Freesurfer', 's', 'reconall', 'on', 'T1', 'nifti', 'file', 'WARNING', ':', 'Reconall', 'takes', 'very', 'long', 'to', 'run!!'] | train | https://github.com/cni/MRS/blob/16098b3cf4830780efd787fee9efa46513850283/MRS/freesurfer.py#L17-L99 |
9,801 | jilljenn/tryalgo | tryalgo/laser_mirrors.py | solve | def solve(succ, orien, i, direc):
"""Can a laser leaving mirror i in direction direc reach exit ?
:param i: mirror index
:param direc: direction leaving mirror i
:param orient: orient[i]=orientation of mirror i
:param succ: succ[i][direc]=succ mirror reached
when leaving i in direction direc
"""
assert orien[i] is not None
j = succ[i][direc]
if j is None: # basic case
return False
if j == len(orien) - 1:
return True
if orien[j] is None: # try both orientations
for x in [0, 1]:
orien[j] = x
if solve(succ, orien, j, reflex[direc][x]):
return True
orien[j] = None
return False
else:
return solve(succ, orien, j, reflex[direc][orien[j]]) | python | def solve(succ, orien, i, direc):
"""Can a laser leaving mirror i in direction direc reach exit ?
:param i: mirror index
:param direc: direction leaving mirror i
:param orient: orient[i]=orientation of mirror i
:param succ: succ[i][direc]=succ mirror reached
when leaving i in direction direc
"""
assert orien[i] is not None
j = succ[i][direc]
if j is None: # basic case
return False
if j == len(orien) - 1:
return True
if orien[j] is None: # try both orientations
for x in [0, 1]:
orien[j] = x
if solve(succ, orien, j, reflex[direc][x]):
return True
orien[j] = None
return False
else:
return solve(succ, orien, j, reflex[direc][orien[j]]) | ['def', 'solve', '(', 'succ', ',', 'orien', ',', 'i', ',', 'direc', ')', ':', 'assert', 'orien', '[', 'i', ']', 'is', 'not', 'None', 'j', '=', 'succ', '[', 'i', ']', '[', 'direc', ']', 'if', 'j', 'is', 'None', ':', '# basic case', 'return', 'False', 'if', 'j', '==', 'len', '(', 'orien', ')', '-', '1', ':', 'return', 'True', 'if', 'orien', '[', 'j', ']', 'is', 'None', ':', '# try both orientations', 'for', 'x', 'in', '[', '0', ',', '1', ']', ':', 'orien', '[', 'j', ']', '=', 'x', 'if', 'solve', '(', 'succ', ',', 'orien', ',', 'j', ',', 'reflex', '[', 'direc', ']', '[', 'x', ']', ')', ':', 'return', 'True', 'orien', '[', 'j', ']', '=', 'None', 'return', 'False', 'else', ':', 'return', 'solve', '(', 'succ', ',', 'orien', ',', 'j', ',', 'reflex', '[', 'direc', ']', '[', 'orien', '[', 'j', ']', ']', ')'] | Can a laser leaving mirror i in direction direc reach exit ?
:param i: mirror index
:param direc: direction leaving mirror i
:param orient: orient[i]=orientation of mirror i
:param succ: succ[i][direc]=succ mirror reached
when leaving i in direction direc | ['Can', 'a', 'laser', 'leaving', 'mirror', 'i', 'in', 'direction', 'direc', 'reach', 'exit', '?'] | train | https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/laser_mirrors.py#L57-L80 |
9,802 | hubo1016/vlcp | vlcp/event/pqueue.py | CBQueue.notifyBlock | def notifyBlock(self, queue, blocked):
'''
Internal notify for sub-queues been blocked
'''
if blocked:
if self.prioritySet[-1] == queue.priority:
self.prioritySet.pop()
else:
pindex = bisect_left(self.prioritySet, queue.priority)
if pindex < len(self.prioritySet) and self.prioritySet[pindex] == queue.priority:
del self.prioritySet[pindex]
else:
if queue.canPop():
pindex = bisect_left(self.prioritySet, queue.priority)
if pindex >= len(self.prioritySet) or self.prioritySet[pindex] != queue.priority:
self.prioritySet.insert(pindex, queue.priority)
newblocked = not self.canPop()
if newblocked != self.blocked:
self.blocked = newblocked
if self.parent is not None:
self.parent.notifyBlock(self, newblocked) | python | def notifyBlock(self, queue, blocked):
'''
Internal notify for sub-queues been blocked
'''
if blocked:
if self.prioritySet[-1] == queue.priority:
self.prioritySet.pop()
else:
pindex = bisect_left(self.prioritySet, queue.priority)
if pindex < len(self.prioritySet) and self.prioritySet[pindex] == queue.priority:
del self.prioritySet[pindex]
else:
if queue.canPop():
pindex = bisect_left(self.prioritySet, queue.priority)
if pindex >= len(self.prioritySet) or self.prioritySet[pindex] != queue.priority:
self.prioritySet.insert(pindex, queue.priority)
newblocked = not self.canPop()
if newblocked != self.blocked:
self.blocked = newblocked
if self.parent is not None:
self.parent.notifyBlock(self, newblocked) | ['def', 'notifyBlock', '(', 'self', ',', 'queue', ',', 'blocked', ')', ':', 'if', 'blocked', ':', 'if', 'self', '.', 'prioritySet', '[', '-', '1', ']', '==', 'queue', '.', 'priority', ':', 'self', '.', 'prioritySet', '.', 'pop', '(', ')', 'else', ':', 'pindex', '=', 'bisect_left', '(', 'self', '.', 'prioritySet', ',', 'queue', '.', 'priority', ')', 'if', 'pindex', '<', 'len', '(', 'self', '.', 'prioritySet', ')', 'and', 'self', '.', 'prioritySet', '[', 'pindex', ']', '==', 'queue', '.', 'priority', ':', 'del', 'self', '.', 'prioritySet', '[', 'pindex', ']', 'else', ':', 'if', 'queue', '.', 'canPop', '(', ')', ':', 'pindex', '=', 'bisect_left', '(', 'self', '.', 'prioritySet', ',', 'queue', '.', 'priority', ')', 'if', 'pindex', '>=', 'len', '(', 'self', '.', 'prioritySet', ')', 'or', 'self', '.', 'prioritySet', '[', 'pindex', ']', '!=', 'queue', '.', 'priority', ':', 'self', '.', 'prioritySet', '.', 'insert', '(', 'pindex', ',', 'queue', '.', 'priority', ')', 'newblocked', '=', 'not', 'self', '.', 'canPop', '(', ')', 'if', 'newblocked', '!=', 'self', '.', 'blocked', ':', 'self', '.', 'blocked', '=', 'newblocked', 'if', 'self', '.', 'parent', 'is', 'not', 'None', ':', 'self', '.', 'parent', '.', 'notifyBlock', '(', 'self', ',', 'newblocked', ')'] | Internal notify for sub-queues been blocked | ['Internal', 'notify', 'for', 'sub', '-', 'queues', 'been', 'blocked'] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/pqueue.py#L903-L923 |
9,803 | incuna/incuna-auth | incuna_auth/middleware/permission_feincms.py | FeinCMSPermissionMiddleware.is_resource_protected | def is_resource_protected(self, request, **kwargs):
"""
Determines if a resource should be protected.
Returns true if and only if the resource's access_state matches an entry in
the return value of get_protected_states().
"""
access_state = self._get_resource_access_state(request)
protected_states = self.get_protected_states()
return access_state in protected_states | python | def is_resource_protected(self, request, **kwargs):
"""
Determines if a resource should be protected.
Returns true if and only if the resource's access_state matches an entry in
the return value of get_protected_states().
"""
access_state = self._get_resource_access_state(request)
protected_states = self.get_protected_states()
return access_state in protected_states | ['def', 'is_resource_protected', '(', 'self', ',', 'request', ',', '*', '*', 'kwargs', ')', ':', 'access_state', '=', 'self', '.', '_get_resource_access_state', '(', 'request', ')', 'protected_states', '=', 'self', '.', 'get_protected_states', '(', ')', 'return', 'access_state', 'in', 'protected_states'] | Determines if a resource should be protected.
Returns true if and only if the resource's access_state matches an entry in
the return value of get_protected_states(). | ['Determines', 'if', 'a', 'resource', 'should', 'be', 'protected', '.'] | train | https://github.com/incuna/incuna-auth/blob/949ccd922da15a4b5de17b9595cc8f5114d5385c/incuna_auth/middleware/permission_feincms.py#L72-L81 |
9,804 | ultrabug/py3status | py3status/request.py | HttpResponse.status_code | def status_code(self):
"""
Get the http status code for the response
"""
try:
return self._status_code
except AttributeError:
self._status_code = self._response.getcode()
return self._status_code | python | def status_code(self):
"""
Get the http status code for the response
"""
try:
return self._status_code
except AttributeError:
self._status_code = self._response.getcode()
return self._status_code | ['def', 'status_code', '(', 'self', ')', ':', 'try', ':', 'return', 'self', '.', '_status_code', 'except', 'AttributeError', ':', 'self', '.', '_status_code', '=', 'self', '.', '_response', '.', 'getcode', '(', ')', 'return', 'self', '.', '_status_code'] | Get the http status code for the response | ['Get', 'the', 'http', 'status', 'code', 'for', 'the', 'response'] | train | https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/request.py#L93-L101 |
9,805 | log2timeline/plaso | plaso/cli/psort_tool.py | PsortTool._ParseInformationalOptions | def _ParseInformationalOptions(self, options):
"""Parses the informational options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
super(PsortTool, self)._ParseInformationalOptions(options)
self._quiet_mode = getattr(options, 'quiet', False)
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['status_view']) | python | def _ParseInformationalOptions(self, options):
"""Parses the informational options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
super(PsortTool, self)._ParseInformationalOptions(options)
self._quiet_mode = getattr(options, 'quiet', False)
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['status_view']) | ['def', '_ParseInformationalOptions', '(', 'self', ',', 'options', ')', ':', 'super', '(', 'PsortTool', ',', 'self', ')', '.', '_ParseInformationalOptions', '(', 'options', ')', 'self', '.', '_quiet_mode', '=', 'getattr', '(', 'options', ',', "'quiet'", ',', 'False', ')', 'helpers_manager', '.', 'ArgumentHelperManager', '.', 'ParseOptions', '(', 'options', ',', 'self', ',', 'names', '=', '[', "'status_view'", ']', ')'] | Parses the informational options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid. | ['Parses', 'the', 'informational', 'options', '.'] | train | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/psort_tool.py#L228-L242 |
9,806 | modin-project/modin | modin/backends/pandas/query_compiler.py | PandasQueryCompiler.numeric_columns | def numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns | python | def numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns | ['def', 'numeric_columns', '(', 'self', ',', 'include_bool', '=', 'True', ')', ':', 'columns', '=', '[', ']', 'for', 'col', ',', 'dtype', 'in', 'zip', '(', 'self', '.', 'columns', ',', 'self', '.', 'dtypes', ')', ':', 'if', 'is_numeric_dtype', '(', 'dtype', ')', 'and', '(', 'include_bool', 'or', '(', 'not', 'include_bool', 'and', 'dtype', '!=', 'np', '.', 'bool_', ')', ')', ':', 'columns', '.', 'append', '(', 'col', ')', 'return', 'columns'] | Returns the numeric columns of the Manager.
Returns:
List of index names. | ['Returns', 'the', 'numeric', 'columns', 'of', 'the', 'Manager', '.'] | train | https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L167-L179 |
9,807 | dhondta/tinyscript | tinyscript/helpers/types.py | neg_int | def neg_int(i):
""" Simple negative integer validation. """
try:
if isinstance(i, string_types):
i = int(i)
if not isinstance(i, int) or i > 0:
raise Exception()
except:
raise ValueError("Not a negative integer")
return i | python | def neg_int(i):
""" Simple negative integer validation. """
try:
if isinstance(i, string_types):
i = int(i)
if not isinstance(i, int) or i > 0:
raise Exception()
except:
raise ValueError("Not a negative integer")
return i | ['def', 'neg_int', '(', 'i', ')', ':', 'try', ':', 'if', 'isinstance', '(', 'i', ',', 'string_types', ')', ':', 'i', '=', 'int', '(', 'i', ')', 'if', 'not', 'isinstance', '(', 'i', ',', 'int', ')', 'or', 'i', '>', '0', ':', 'raise', 'Exception', '(', ')', 'except', ':', 'raise', 'ValueError', '(', '"Not a negative integer"', ')', 'return', 'i'] | Simple negative integer validation. | ['Simple', 'negative', 'integer', 'validation', '.'] | train | https://github.com/dhondta/tinyscript/blob/624a0718db698899e7bc3ba6ac694baed251e81d/tinyscript/helpers/types.py#L21-L30 |
9,808 | buildbot/buildbot | master/buildbot/plugins/db.py | _PluginDB.add_namespace | def add_namespace(self, namespace, interface=None, check_extras=True,
load_now=False):
"""
register given namespace in global database of plugins
in case it's already registered, return the registration
"""
tempo = self._namespaces.get(namespace)
if tempo is None:
tempo = _Plugins(namespace, interface, check_extras)
self._namespaces[namespace] = tempo
if load_now:
tempo.load()
return tempo | python | def add_namespace(self, namespace, interface=None, check_extras=True,
load_now=False):
"""
register given namespace in global database of plugins
in case it's already registered, return the registration
"""
tempo = self._namespaces.get(namespace)
if tempo is None:
tempo = _Plugins(namespace, interface, check_extras)
self._namespaces[namespace] = tempo
if load_now:
tempo.load()
return tempo | ['def', 'add_namespace', '(', 'self', ',', 'namespace', ',', 'interface', '=', 'None', ',', 'check_extras', '=', 'True', ',', 'load_now', '=', 'False', ')', ':', 'tempo', '=', 'self', '.', '_namespaces', '.', 'get', '(', 'namespace', ')', 'if', 'tempo', 'is', 'None', ':', 'tempo', '=', '_Plugins', '(', 'namespace', ',', 'interface', ',', 'check_extras', ')', 'self', '.', '_namespaces', '[', 'namespace', ']', '=', 'tempo', 'if', 'load_now', ':', 'tempo', '.', 'load', '(', ')', 'return', 'tempo'] | register given namespace in global database of plugins
in case it's already registered, return the registration | ['register', 'given', 'namespace', 'in', 'global', 'database', 'of', 'plugins'] | train | https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/plugins/db.py#L287-L303 |
9,809 | trevisanj/f311 | f311/util.py | load_spectrum_fits_messed_x | def load_spectrum_fits_messed_x(filename, sp_ref=None):
"""Loads FITS file spectrum that does not have the proper headers. Returns a Spectrum"""
import f311.filetypes as ft
# First tries to load as usual
f = load_with_classes(filename, (ft.FileSpectrumFits,))
if f is not None:
ret = f.spectrum
else:
hdul = fits.open(filename)
hdu = hdul[0]
if not hdu.header.get("CDELT1"):
hdu.header["CDELT1"] = 1 if sp_ref is None else sp_ref.delta_lambda
if not hdu.header.get("CRVAL1"):
hdu.header["CRVAL1"] = 0 if sp_ref is None else sp_ref.x[0]
ret = ft.Spectrum()
ret.from_hdu(hdu)
ret.filename = filename
original_shape = ret.y.shape # Shape of data before squeeze
# Squeezes to make data of shape e.g. (1, 1, 122) into (122,)
ret.y = ret.y.squeeze()
if len(ret.y.shape) > 1:
raise RuntimeError(
"Data contains more than 1 dimension (shape is {0!s}), "
"FITS file is not single spectrum".format(original_shape))
return ret | python | def load_spectrum_fits_messed_x(filename, sp_ref=None):
"""Loads FITS file spectrum that does not have the proper headers. Returns a Spectrum"""
import f311.filetypes as ft
# First tries to load as usual
f = load_with_classes(filename, (ft.FileSpectrumFits,))
if f is not None:
ret = f.spectrum
else:
hdul = fits.open(filename)
hdu = hdul[0]
if not hdu.header.get("CDELT1"):
hdu.header["CDELT1"] = 1 if sp_ref is None else sp_ref.delta_lambda
if not hdu.header.get("CRVAL1"):
hdu.header["CRVAL1"] = 0 if sp_ref is None else sp_ref.x[0]
ret = ft.Spectrum()
ret.from_hdu(hdu)
ret.filename = filename
original_shape = ret.y.shape # Shape of data before squeeze
# Squeezes to make data of shape e.g. (1, 1, 122) into (122,)
ret.y = ret.y.squeeze()
if len(ret.y.shape) > 1:
raise RuntimeError(
"Data contains more than 1 dimension (shape is {0!s}), "
"FITS file is not single spectrum".format(original_shape))
return ret | ['def', 'load_spectrum_fits_messed_x', '(', 'filename', ',', 'sp_ref', '=', 'None', ')', ':', 'import', 'f311', '.', 'filetypes', 'as', 'ft', '# First tries to load as usual', 'f', '=', 'load_with_classes', '(', 'filename', ',', '(', 'ft', '.', 'FileSpectrumFits', ',', ')', ')', 'if', 'f', 'is', 'not', 'None', ':', 'ret', '=', 'f', '.', 'spectrum', 'else', ':', 'hdul', '=', 'fits', '.', 'open', '(', 'filename', ')', 'hdu', '=', 'hdul', '[', '0', ']', 'if', 'not', 'hdu', '.', 'header', '.', 'get', '(', '"CDELT1"', ')', ':', 'hdu', '.', 'header', '[', '"CDELT1"', ']', '=', '1', 'if', 'sp_ref', 'is', 'None', 'else', 'sp_ref', '.', 'delta_lambda', 'if', 'not', 'hdu', '.', 'header', '.', 'get', '(', '"CRVAL1"', ')', ':', 'hdu', '.', 'header', '[', '"CRVAL1"', ']', '=', '0', 'if', 'sp_ref', 'is', 'None', 'else', 'sp_ref', '.', 'x', '[', '0', ']', 'ret', '=', 'ft', '.', 'Spectrum', '(', ')', 'ret', '.', 'from_hdu', '(', 'hdu', ')', 'ret', '.', 'filename', '=', 'filename', 'original_shape', '=', 'ret', '.', 'y', '.', 'shape', '# Shape of data before squeeze', '# Squeezes to make data of shape e.g. (1, 1, 122) into (122,)', 'ret', '.', 'y', '=', 'ret', '.', 'y', '.', 'squeeze', '(', ')', 'if', 'len', '(', 'ret', '.', 'y', '.', 'shape', ')', '>', '1', ':', 'raise', 'RuntimeError', '(', '"Data contains more than 1 dimension (shape is {0!s}), "', '"FITS file is not single spectrum"', '.', 'format', '(', 'original_shape', ')', ')', 'return', 'ret'] | Loads FITS file spectrum that does not have the proper headers. Returns a Spectrum | ['Loads', 'FITS', 'file', 'spectrum', 'that', 'does', 'not', 'have', 'the', 'proper', 'headers', '.', 'Returns', 'a', 'Spectrum'] | train | https://github.com/trevisanj/f311/blob/9e502a3d1e1f74d4290a8a0bae9a34ef8d7b29f7/f311/util.py#L122-L152 |
9,810 | bwohlberg/sporco | sporco/cnvrep.py | Pcn | def Pcn(x, dsz, Nv, dimN=2, dimC=1, crp=False, zm=False):
"""Constraint set projection for convolutional dictionary update
problem.
Parameters
----------
x : array_like
Input array
dsz : tuple
Filter support size(s), specified using the same format as the `dsz`
parameter of :func:`bcrop`
Nv : tuple
Sizes of problem spatial indices
dimN : int, optional (default 2)
Number of problem spatial indices
dimC : int, optional (default 1)
Number of problem channel indices
crp : bool, optional (default False)
Flag indicating whether the result should be cropped to the support
of the largest filter in the dictionary.
zm : bool, optional (default False)
Flag indicating whether the projection function should include
filter mean subtraction
Returns
-------
y : ndarray
Projection of input onto constraint set
"""
if crp:
def zpadfn(x):
return x
else:
def zpadfn(x):
return zpad(x, Nv)
if zm:
def zmeanfn(x):
return zeromean(x, dsz, dimN)
else:
def zmeanfn(x):
return x
return normalise(zmeanfn(zpadfn(bcrop(x, dsz, dimN))), dimN + dimC) | python | def Pcn(x, dsz, Nv, dimN=2, dimC=1, crp=False, zm=False):
"""Constraint set projection for convolutional dictionary update
problem.
Parameters
----------
x : array_like
Input array
dsz : tuple
Filter support size(s), specified using the same format as the `dsz`
parameter of :func:`bcrop`
Nv : tuple
Sizes of problem spatial indices
dimN : int, optional (default 2)
Number of problem spatial indices
dimC : int, optional (default 1)
Number of problem channel indices
crp : bool, optional (default False)
Flag indicating whether the result should be cropped to the support
of the largest filter in the dictionary.
zm : bool, optional (default False)
Flag indicating whether the projection function should include
filter mean subtraction
Returns
-------
y : ndarray
Projection of input onto constraint set
"""
if crp:
def zpadfn(x):
return x
else:
def zpadfn(x):
return zpad(x, Nv)
if zm:
def zmeanfn(x):
return zeromean(x, dsz, dimN)
else:
def zmeanfn(x):
return x
return normalise(zmeanfn(zpadfn(bcrop(x, dsz, dimN))), dimN + dimC) | ['def', 'Pcn', '(', 'x', ',', 'dsz', ',', 'Nv', ',', 'dimN', '=', '2', ',', 'dimC', '=', '1', ',', 'crp', '=', 'False', ',', 'zm', '=', 'False', ')', ':', 'if', 'crp', ':', 'def', 'zpadfn', '(', 'x', ')', ':', 'return', 'x', 'else', ':', 'def', 'zpadfn', '(', 'x', ')', ':', 'return', 'zpad', '(', 'x', ',', 'Nv', ')', 'if', 'zm', ':', 'def', 'zmeanfn', '(', 'x', ')', ':', 'return', 'zeromean', '(', 'x', ',', 'dsz', ',', 'dimN', ')', 'else', ':', 'def', 'zmeanfn', '(', 'x', ')', ':', 'return', 'x', 'return', 'normalise', '(', 'zmeanfn', '(', 'zpadfn', '(', 'bcrop', '(', 'x', ',', 'dsz', ',', 'dimN', ')', ')', ')', ',', 'dimN', '+', 'dimC', ')'] | Constraint set projection for convolutional dictionary update
problem.
Parameters
----------
x : array_like
Input array
dsz : tuple
Filter support size(s), specified using the same format as the `dsz`
parameter of :func:`bcrop`
Nv : tuple
Sizes of problem spatial indices
dimN : int, optional (default 2)
Number of problem spatial indices
dimC : int, optional (default 1)
Number of problem channel indices
crp : bool, optional (default False)
Flag indicating whether the result should be cropped to the support
of the largest filter in the dictionary.
zm : bool, optional (default False)
Flag indicating whether the projection function should include
filter mean subtraction
Returns
-------
y : ndarray
Projection of input onto constraint set | ['Constraint', 'set', 'projection', 'for', 'convolutional', 'dictionary', 'update', 'problem', '.'] | train | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/cnvrep.py#L842-L886 |
9,811 | briancappello/flask-unchained | flask_unchained/bundles/security/services/security_service.py | SecurityService.register_user | def register_user(self, user, allow_login=None, send_email=None,
_force_login_without_confirmation=False):
"""
Service method to register a user.
Sends signal `user_registered`.
Returns True if the user has been logged in, False otherwise.
"""
should_login_user = (not self.security.confirmable
or self.security.login_without_confirmation
or _force_login_without_confirmation)
should_login_user = (should_login_user if allow_login is None
else allow_login and should_login_user)
if should_login_user:
user.active = True
# confirmation token depends on having user.id set, which requires
# the user be committed to the database
self.user_manager.save(user, commit=True)
confirmation_link, token = None, None
if self.security.confirmable and not _force_login_without_confirmation:
token = self.security_utils_service.generate_confirmation_token(user)
confirmation_link = url_for('security_controller.confirm_email',
token=token, _external=True)
user_registered.send(app._get_current_object(),
user=user, confirm_token=token)
if (send_email or (
send_email is None
and app.config.SECURITY_SEND_REGISTER_EMAIL)):
self.send_mail(_('flask_unchained.bundles.security:email_subject.register'),
to=user.email,
template='security/email/welcome.html',
user=user,
confirmation_link=confirmation_link)
if should_login_user:
return self.login_user(user, force=_force_login_without_confirmation)
return False | python | def register_user(self, user, allow_login=None, send_email=None,
_force_login_without_confirmation=False):
"""
Service method to register a user.
Sends signal `user_registered`.
Returns True if the user has been logged in, False otherwise.
"""
should_login_user = (not self.security.confirmable
or self.security.login_without_confirmation
or _force_login_without_confirmation)
should_login_user = (should_login_user if allow_login is None
else allow_login and should_login_user)
if should_login_user:
user.active = True
# confirmation token depends on having user.id set, which requires
# the user be committed to the database
self.user_manager.save(user, commit=True)
confirmation_link, token = None, None
if self.security.confirmable and not _force_login_without_confirmation:
token = self.security_utils_service.generate_confirmation_token(user)
confirmation_link = url_for('security_controller.confirm_email',
token=token, _external=True)
user_registered.send(app._get_current_object(),
user=user, confirm_token=token)
if (send_email or (
send_email is None
and app.config.SECURITY_SEND_REGISTER_EMAIL)):
self.send_mail(_('flask_unchained.bundles.security:email_subject.register'),
to=user.email,
template='security/email/welcome.html',
user=user,
confirmation_link=confirmation_link)
if should_login_user:
return self.login_user(user, force=_force_login_without_confirmation)
return False | ['def', 'register_user', '(', 'self', ',', 'user', ',', 'allow_login', '=', 'None', ',', 'send_email', '=', 'None', ',', '_force_login_without_confirmation', '=', 'False', ')', ':', 'should_login_user', '=', '(', 'not', 'self', '.', 'security', '.', 'confirmable', 'or', 'self', '.', 'security', '.', 'login_without_confirmation', 'or', '_force_login_without_confirmation', ')', 'should_login_user', '=', '(', 'should_login_user', 'if', 'allow_login', 'is', 'None', 'else', 'allow_login', 'and', 'should_login_user', ')', 'if', 'should_login_user', ':', 'user', '.', 'active', '=', 'True', '# confirmation token depends on having user.id set, which requires', '# the user be committed to the database', 'self', '.', 'user_manager', '.', 'save', '(', 'user', ',', 'commit', '=', 'True', ')', 'confirmation_link', ',', 'token', '=', 'None', ',', 'None', 'if', 'self', '.', 'security', '.', 'confirmable', 'and', 'not', '_force_login_without_confirmation', ':', 'token', '=', 'self', '.', 'security_utils_service', '.', 'generate_confirmation_token', '(', 'user', ')', 'confirmation_link', '=', 'url_for', '(', "'security_controller.confirm_email'", ',', 'token', '=', 'token', ',', '_external', '=', 'True', ')', 'user_registered', '.', 'send', '(', 'app', '.', '_get_current_object', '(', ')', ',', 'user', '=', 'user', ',', 'confirm_token', '=', 'token', ')', 'if', '(', 'send_email', 'or', '(', 'send_email', 'is', 'None', 'and', 'app', '.', 'config', '.', 'SECURITY_SEND_REGISTER_EMAIL', ')', ')', ':', 'self', '.', 'send_mail', '(', '_', '(', "'flask_unchained.bundles.security:email_subject.register'", ')', ',', 'to', '=', 'user', '.', 'email', ',', 'template', '=', "'security/email/welcome.html'", ',', 'user', '=', 'user', ',', 'confirmation_link', '=', 'confirmation_link', ')', 'if', 'should_login_user', ':', 'return', 'self', '.', 'login_user', '(', 'user', ',', 'force', '=', '_force_login_without_confirmation', ')', 'return', 'False'] | Service method to register a user.
Sends signal `user_registered`.
Returns True if the user has been logged in, False otherwise. | ['Service', 'method', 'to', 'register', 'a', 'user', '.'] | train | https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/security/services/security_service.py#L122-L163 |
9,812 | DLR-RM/RAFCON | source/rafcon/core/execution/execution_engine.py | ExecutionEngine.pause | def pause(self):
"""Set the execution mode to paused
"""
if self.state_machine_manager.active_state_machine_id is None:
logger.info("'Pause' is not a valid action to initiate state machine execution.")
return
if self.state_machine_manager.get_active_state_machine() is not None:
self.state_machine_manager.get_active_state_machine().root_state.recursively_pause_states()
logger.debug("Pause execution ...")
self.set_execution_mode(StateMachineExecutionStatus.PAUSED) | python | def pause(self):
"""Set the execution mode to paused
"""
if self.state_machine_manager.active_state_machine_id is None:
logger.info("'Pause' is not a valid action to initiate state machine execution.")
return
if self.state_machine_manager.get_active_state_machine() is not None:
self.state_machine_manager.get_active_state_machine().root_state.recursively_pause_states()
logger.debug("Pause execution ...")
self.set_execution_mode(StateMachineExecutionStatus.PAUSED) | ['def', 'pause', '(', 'self', ')', ':', 'if', 'self', '.', 'state_machine_manager', '.', 'active_state_machine_id', 'is', 'None', ':', 'logger', '.', 'info', '(', '"\'Pause\' is not a valid action to initiate state machine execution."', ')', 'return', 'if', 'self', '.', 'state_machine_manager', '.', 'get_active_state_machine', '(', ')', 'is', 'not', 'None', ':', 'self', '.', 'state_machine_manager', '.', 'get_active_state_machine', '(', ')', '.', 'root_state', '.', 'recursively_pause_states', '(', ')', 'logger', '.', 'debug', '(', '"Pause execution ..."', ')', 'self', '.', 'set_execution_mode', '(', 'StateMachineExecutionStatus', '.', 'PAUSED', ')'] | Set the execution mode to paused | ['Set', 'the', 'execution', 'mode', 'to', 'paused'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/execution/execution_engine.py#L70-L80 |
9,813 | gwastro/pycbc | pycbc/results/legacy_grb.py | write_recovery | def write_recovery(page, injList):
"""
Write injection recovery plots to markup.page object page
"""
th = ['']+injList
td = []
plots = ['sky_error_time','sky_error_mchirp','sky_error_distance']
text = { 'sky_error_time':'Sky error vs time',\
'sky_error_mchirp':'Sky error vs mchirp',\
'sky_error_distance':'Sky error vs distance' }
for row in plots:
pTag = text[row]
d = [pTag]
for inj in injList:
plot = markup.page()
plot = markup.page()
p = "%s/efficiency_OFFTRIAL_1/found_%s.png" % (inj, row)
plot.a(href=p, title=pTag)
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
page = write_table(page, th, td)
return page | python | def write_recovery(page, injList):
"""
Write injection recovery plots to markup.page object page
"""
th = ['']+injList
td = []
plots = ['sky_error_time','sky_error_mchirp','sky_error_distance']
text = { 'sky_error_time':'Sky error vs time',\
'sky_error_mchirp':'Sky error vs mchirp',\
'sky_error_distance':'Sky error vs distance' }
for row in plots:
pTag = text[row]
d = [pTag]
for inj in injList:
plot = markup.page()
plot = markup.page()
p = "%s/efficiency_OFFTRIAL_1/found_%s.png" % (inj, row)
plot.a(href=p, title=pTag)
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
page = write_table(page, th, td)
return page | ['def', 'write_recovery', '(', 'page', ',', 'injList', ')', ':', 'th', '=', '[', "''", ']', '+', 'injList', 'td', '=', '[', ']', 'plots', '=', '[', "'sky_error_time'", ',', "'sky_error_mchirp'", ',', "'sky_error_distance'", ']', 'text', '=', '{', "'sky_error_time'", ':', "'Sky error vs time'", ',', "'sky_error_mchirp'", ':', "'Sky error vs mchirp'", ',', "'sky_error_distance'", ':', "'Sky error vs distance'", '}', 'for', 'row', 'in', 'plots', ':', 'pTag', '=', 'text', '[', 'row', ']', 'd', '=', '[', 'pTag', ']', 'for', 'inj', 'in', 'injList', ':', 'plot', '=', 'markup', '.', 'page', '(', ')', 'plot', '=', 'markup', '.', 'page', '(', ')', 'p', '=', '"%s/efficiency_OFFTRIAL_1/found_%s.png"', '%', '(', 'inj', ',', 'row', ')', 'plot', '.', 'a', '(', 'href', '=', 'p', ',', 'title', '=', 'pTag', ')', 'plot', '.', 'img', '(', 'src', '=', 'p', ')', 'plot', '.', 'a', '.', 'close', '(', ')', 'd', '.', 'append', '(', 'plot', '(', ')', ')', 'td', '.', 'append', '(', 'd', ')', 'page', '=', 'write_table', '(', 'page', ',', 'th', ',', 'td', ')', 'return', 'page'] | Write injection recovery plots to markup.page object page | ['Write', 'injection', 'recovery', 'plots', 'to', 'markup', '.', 'page', 'object', 'page'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/results/legacy_grb.py#L483-L512 |
9,814 | williamgilpin/pypdb | pypdb/pypdb.py | find_results_gen | def find_results_gen(search_term, field='title'):
'''
Return a generator of the results returned by a search of
the protein data bank. This generator is used internally.
Parameters
----------
search_term : str
The search keyword
field : str
The type of information to record about each entry
Examples
--------
>>> result_gen = find_results_gen('bleb')
>>> pprint.pprint([item for item in result_gen][:5])
['MYOSIN II DICTYOSTELIUM DISCOIDEUM MOTOR DOMAIN S456Y BOUND WITH MGADP-BEFX',
'MYOSIN II DICTYOSTELIUM DISCOIDEUM MOTOR DOMAIN S456Y BOUND WITH MGADP-ALF4',
'DICTYOSTELIUM DISCOIDEUM MYOSIN II MOTOR DOMAIN S456E WITH BOUND MGADP-BEFX',
'MYOSIN II DICTYOSTELIUM DISCOIDEUM MOTOR DOMAIN S456E BOUND WITH MGADP-ALF4',
'The structural basis of blebbistatin inhibition and specificity for myosin '
'II']
'''
scan_params = make_query(search_term, querytype='AdvancedKeywordQuery')
search_result_ids = do_search(scan_params)
all_titles = []
for pdb_result in search_result_ids:
result= describe_pdb(pdb_result)
if field in result.keys():
yield result[field] | python | def find_results_gen(search_term, field='title'):
'''
Return a generator of the results returned by a search of
the protein data bank. This generator is used internally.
Parameters
----------
search_term : str
The search keyword
field : str
The type of information to record about each entry
Examples
--------
>>> result_gen = find_results_gen('bleb')
>>> pprint.pprint([item for item in result_gen][:5])
['MYOSIN II DICTYOSTELIUM DISCOIDEUM MOTOR DOMAIN S456Y BOUND WITH MGADP-BEFX',
'MYOSIN II DICTYOSTELIUM DISCOIDEUM MOTOR DOMAIN S456Y BOUND WITH MGADP-ALF4',
'DICTYOSTELIUM DISCOIDEUM MYOSIN II MOTOR DOMAIN S456E WITH BOUND MGADP-BEFX',
'MYOSIN II DICTYOSTELIUM DISCOIDEUM MOTOR DOMAIN S456E BOUND WITH MGADP-ALF4',
'The structural basis of blebbistatin inhibition and specificity for myosin '
'II']
'''
scan_params = make_query(search_term, querytype='AdvancedKeywordQuery')
search_result_ids = do_search(scan_params)
all_titles = []
for pdb_result in search_result_ids:
result= describe_pdb(pdb_result)
if field in result.keys():
yield result[field] | ['def', 'find_results_gen', '(', 'search_term', ',', 'field', '=', "'title'", ')', ':', 'scan_params', '=', 'make_query', '(', 'search_term', ',', 'querytype', '=', "'AdvancedKeywordQuery'", ')', 'search_result_ids', '=', 'do_search', '(', 'scan_params', ')', 'all_titles', '=', '[', ']', 'for', 'pdb_result', 'in', 'search_result_ids', ':', 'result', '=', 'describe_pdb', '(', 'pdb_result', ')', 'if', 'field', 'in', 'result', '.', 'keys', '(', ')', ':', 'yield', 'result', '[', 'field', ']'] | Return a generator of the results returned by a search of
the protein data bank. This generator is used internally.
Parameters
----------
search_term : str
The search keyword
field : str
The type of information to record about each entry
Examples
--------
>>> result_gen = find_results_gen('bleb')
>>> pprint.pprint([item for item in result_gen][:5])
['MYOSIN II DICTYOSTELIUM DISCOIDEUM MOTOR DOMAIN S456Y BOUND WITH MGADP-BEFX',
'MYOSIN II DICTYOSTELIUM DISCOIDEUM MOTOR DOMAIN S456Y BOUND WITH MGADP-ALF4',
'DICTYOSTELIUM DISCOIDEUM MYOSIN II MOTOR DOMAIN S456E WITH BOUND MGADP-BEFX',
'MYOSIN II DICTYOSTELIUM DISCOIDEUM MOTOR DOMAIN S456E BOUND WITH MGADP-ALF4',
'The structural basis of blebbistatin inhibition and specificity for myosin '
'II'] | ['Return', 'a', 'generator', 'of', 'the', 'results', 'returned', 'by', 'a', 'search', 'of', 'the', 'protein', 'data', 'bank', '.', 'This', 'generator', 'is', 'used', 'internally', '.'] | train | https://github.com/williamgilpin/pypdb/blob/bfb9e1b15b4ad097c5add50c4c176ac6cb28ee15/pypdb/pypdb.py#L904-L938 |
9,815 | saltstack/salt | salt/modules/dockermod.py | _get_create_kwargs | def _get_create_kwargs(skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
client_args=None,
**kwargs):
'''
Take input kwargs and return a kwargs dict to pass to docker-py's
create_container() function.
'''
networks = kwargs.pop('networks', {})
if kwargs.get('network_mode', '') in networks:
networks = {kwargs['network_mode']: networks[kwargs['network_mode']]}
else:
networks = {}
kwargs = __utils__['docker.translate_input'](
salt.utils.docker.translate.container,
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
validate_ip_addrs=validate_ip_addrs,
**__utils__['args.clean_kwargs'](**kwargs))
if networks:
kwargs['networking_config'] = _create_networking_config(networks)
if client_args is None:
try:
client_args = get_client_args(['create_container', 'host_config'])
except CommandExecutionError as exc:
log.error('docker.create: Error getting client args: \'%s\'',
exc.__str__(), exc_info=True)
raise CommandExecutionError(
'Failed to get client args: {0}'.format(exc))
full_host_config = {}
host_kwargs = {}
create_kwargs = {}
# Using list() becausee we'll be altering kwargs during iteration
for arg in list(kwargs):
if arg in client_args['host_config']:
host_kwargs[arg] = kwargs.pop(arg)
continue
if arg in client_args['create_container']:
if arg == 'host_config':
full_host_config.update(kwargs.pop(arg))
else:
create_kwargs[arg] = kwargs.pop(arg)
continue
create_kwargs['host_config'] = \
_client_wrapper('create_host_config', **host_kwargs)
# In the event that a full host_config was passed, overlay it on top of the
# one we just created.
create_kwargs['host_config'].update(full_host_config)
# The "kwargs" dict at this point will only contain unused args
return create_kwargs, kwargs | python | def _get_create_kwargs(skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
client_args=None,
**kwargs):
'''
Take input kwargs and return a kwargs dict to pass to docker-py's
create_container() function.
'''
networks = kwargs.pop('networks', {})
if kwargs.get('network_mode', '') in networks:
networks = {kwargs['network_mode']: networks[kwargs['network_mode']]}
else:
networks = {}
kwargs = __utils__['docker.translate_input'](
salt.utils.docker.translate.container,
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
validate_ip_addrs=validate_ip_addrs,
**__utils__['args.clean_kwargs'](**kwargs))
if networks:
kwargs['networking_config'] = _create_networking_config(networks)
if client_args is None:
try:
client_args = get_client_args(['create_container', 'host_config'])
except CommandExecutionError as exc:
log.error('docker.create: Error getting client args: \'%s\'',
exc.__str__(), exc_info=True)
raise CommandExecutionError(
'Failed to get client args: {0}'.format(exc))
full_host_config = {}
host_kwargs = {}
create_kwargs = {}
# Using list() becausee we'll be altering kwargs during iteration
for arg in list(kwargs):
if arg in client_args['host_config']:
host_kwargs[arg] = kwargs.pop(arg)
continue
if arg in client_args['create_container']:
if arg == 'host_config':
full_host_config.update(kwargs.pop(arg))
else:
create_kwargs[arg] = kwargs.pop(arg)
continue
create_kwargs['host_config'] = \
_client_wrapper('create_host_config', **host_kwargs)
# In the event that a full host_config was passed, overlay it on top of the
# one we just created.
create_kwargs['host_config'].update(full_host_config)
# The "kwargs" dict at this point will only contain unused args
return create_kwargs, kwargs | ['def', '_get_create_kwargs', '(', 'skip_translate', '=', 'None', ',', 'ignore_collisions', '=', 'False', ',', 'validate_ip_addrs', '=', 'True', ',', 'client_args', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'networks', '=', 'kwargs', '.', 'pop', '(', "'networks'", ',', '{', '}', ')', 'if', 'kwargs', '.', 'get', '(', "'network_mode'", ',', "''", ')', 'in', 'networks', ':', 'networks', '=', '{', 'kwargs', '[', "'network_mode'", ']', ':', 'networks', '[', 'kwargs', '[', "'network_mode'", ']', ']', '}', 'else', ':', 'networks', '=', '{', '}', 'kwargs', '=', '__utils__', '[', "'docker.translate_input'", ']', '(', 'salt', '.', 'utils', '.', 'docker', '.', 'translate', '.', 'container', ',', 'skip_translate', '=', 'skip_translate', ',', 'ignore_collisions', '=', 'ignore_collisions', ',', 'validate_ip_addrs', '=', 'validate_ip_addrs', ',', '*', '*', '__utils__', '[', "'args.clean_kwargs'", ']', '(', '*', '*', 'kwargs', ')', ')', 'if', 'networks', ':', 'kwargs', '[', "'networking_config'", ']', '=', '_create_networking_config', '(', 'networks', ')', 'if', 'client_args', 'is', 'None', ':', 'try', ':', 'client_args', '=', 'get_client_args', '(', '[', "'create_container'", ',', "'host_config'", ']', ')', 'except', 'CommandExecutionError', 'as', 'exc', ':', 'log', '.', 'error', '(', "'docker.create: Error getting client args: \\'%s\\''", ',', 'exc', '.', '__str__', '(', ')', ',', 'exc_info', '=', 'True', ')', 'raise', 'CommandExecutionError', '(', "'Failed to get client args: {0}'", '.', 'format', '(', 'exc', ')', ')', 'full_host_config', '=', '{', '}', 'host_kwargs', '=', '{', '}', 'create_kwargs', '=', '{', '}', "# Using list() becausee we'll be altering kwargs during iteration", 'for', 'arg', 'in', 'list', '(', 'kwargs', ')', ':', 'if', 'arg', 'in', 'client_args', '[', "'host_config'", ']', ':', 'host_kwargs', '[', 'arg', ']', '=', 'kwargs', '.', 'pop', '(', 'arg', ')', 'continue', 'if', 'arg', 'in', 'client_args', '[', "'create_container'", ']', ':', 'if', 'arg', '==', "'host_config'", ':', 'full_host_config', '.', 'update', '(', 'kwargs', '.', 'pop', '(', 'arg', ')', ')', 'else', ':', 'create_kwargs', '[', 'arg', ']', '=', 'kwargs', '.', 'pop', '(', 'arg', ')', 'continue', 'create_kwargs', '[', "'host_config'", ']', '=', '_client_wrapper', '(', "'create_host_config'", ',', '*', '*', 'host_kwargs', ')', '# In the event that a full host_config was passed, overlay it on top of the', '# one we just created.', 'create_kwargs', '[', "'host_config'", ']', '.', 'update', '(', 'full_host_config', ')', '# The "kwargs" dict at this point will only contain unused args', 'return', 'create_kwargs', ',', 'kwargs'] | Take input kwargs and return a kwargs dict to pass to docker-py's
create_container() function. | ['Take', 'input', 'kwargs', 'and', 'return', 'a', 'kwargs', 'dict', 'to', 'pass', 'to', 'docker', '-', 'py', 's', 'create_container', '()', 'function', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dockermod.py#L852-L907 |
9,816 | quadrismegistus/prosodic | prosodic/entity.py | entity.newchild | def newchild(self,chld=False):
"""Like givebirth(), but also appends the new child to the list of children."""
if not chld:
chld = self.givebirth()
lchld=[chld] if type(chld)!=list else chld
for chldx in lchld: chldx.parent=self
self.children.append(chld)
return chld | python | def newchild(self,chld=False):
"""Like givebirth(), but also appends the new child to the list of children."""
if not chld:
chld = self.givebirth()
lchld=[chld] if type(chld)!=list else chld
for chldx in lchld: chldx.parent=self
self.children.append(chld)
return chld | ['def', 'newchild', '(', 'self', ',', 'chld', '=', 'False', ')', ':', 'if', 'not', 'chld', ':', 'chld', '=', 'self', '.', 'givebirth', '(', ')', 'lchld', '=', '[', 'chld', ']', 'if', 'type', '(', 'chld', ')', '!=', 'list', 'else', 'chld', 'for', 'chldx', 'in', 'lchld', ':', 'chldx', '.', 'parent', '=', 'self', 'self', '.', 'children', '.', 'append', '(', 'chld', ')', 'return', 'chld'] | Like givebirth(), but also appends the new child to the list of children. | ['Like', 'givebirth', '()', 'but', 'also', 'appends', 'the', 'new', 'child', 'to', 'the', 'list', 'of', 'children', '.'] | train | https://github.com/quadrismegistus/prosodic/blob/8af66ed9be40c922d03a0b09bc11c87d2061b618/prosodic/entity.py#L243-L253 |
9,817 | mbedmicro/pyOCD | pyocd/utility/conversion.py | u64_to_hex16le | def u64_to_hex16le(val):
"""! @brief Create 16-digit hexadecimal string from 64-bit register value"""
return ''.join("%02x" % (x & 0xFF) for x in (
val,
val >> 8,
val >> 16,
val >> 24,
val >> 32,
val >> 40,
val >> 48,
val >> 56,
)) | python | def u64_to_hex16le(val):
"""! @brief Create 16-digit hexadecimal string from 64-bit register value"""
return ''.join("%02x" % (x & 0xFF) for x in (
val,
val >> 8,
val >> 16,
val >> 24,
val >> 32,
val >> 40,
val >> 48,
val >> 56,
)) | ['def', 'u64_to_hex16le', '(', 'val', ')', ':', 'return', "''", '.', 'join', '(', '"%02x"', '%', '(', 'x', '&', '0xFF', ')', 'for', 'x', 'in', '(', 'val', ',', 'val', '>>', '8', ',', 'val', '>>', '16', ',', 'val', '>>', '24', ',', 'val', '>>', '32', ',', 'val', '>>', '40', ',', 'val', '>>', '48', ',', 'val', '>>', '56', ',', ')', ')'] | ! @brief Create 16-digit hexadecimal string from 64-bit register value | ['!'] | train | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/utility/conversion.py#L92-L103 |
9,818 | Nachtfeuer/pipeline | spline/components/tasks.py | Tasks.prepare_shell_data | def prepare_shell_data(self, shells, key, entry):
"""Prepare one shell or docker task."""
if self.can_process_shell(entry):
if key in ['python']:
entry['type'] = key
if 'with' in entry and isinstance(entry['with'], str):
rendered_with = ast.literal_eval(render(entry['with'],
variables=self.pipeline.variables,
model=self.pipeline.model,
env=self.get_merged_env(include_os=True)))
elif 'with' in entry:
rendered_with = entry['with']
else:
rendered_with = ['']
for item in rendered_with:
shells.append({
'id': self.next_task_id,
'creator': key,
'entry': entry,
'model': self.pipeline.model,
'env': self.get_merged_env(),
'item': item,
'dry_run': self.pipeline.options.dry_run,
'debug': self.pipeline.options.debug,
'strict': self.pipeline.options.strict,
'variables': self.pipeline.variables,
'temporary_scripts_path': self.pipeline.options.temporary_scripts_path})
self.next_task_id += 1 | python | def prepare_shell_data(self, shells, key, entry):
"""Prepare one shell or docker task."""
if self.can_process_shell(entry):
if key in ['python']:
entry['type'] = key
if 'with' in entry and isinstance(entry['with'], str):
rendered_with = ast.literal_eval(render(entry['with'],
variables=self.pipeline.variables,
model=self.pipeline.model,
env=self.get_merged_env(include_os=True)))
elif 'with' in entry:
rendered_with = entry['with']
else:
rendered_with = ['']
for item in rendered_with:
shells.append({
'id': self.next_task_id,
'creator': key,
'entry': entry,
'model': self.pipeline.model,
'env': self.get_merged_env(),
'item': item,
'dry_run': self.pipeline.options.dry_run,
'debug': self.pipeline.options.debug,
'strict': self.pipeline.options.strict,
'variables': self.pipeline.variables,
'temporary_scripts_path': self.pipeline.options.temporary_scripts_path})
self.next_task_id += 1 | ['def', 'prepare_shell_data', '(', 'self', ',', 'shells', ',', 'key', ',', 'entry', ')', ':', 'if', 'self', '.', 'can_process_shell', '(', 'entry', ')', ':', 'if', 'key', 'in', '[', "'python'", ']', ':', 'entry', '[', "'type'", ']', '=', 'key', 'if', "'with'", 'in', 'entry', 'and', 'isinstance', '(', 'entry', '[', "'with'", ']', ',', 'str', ')', ':', 'rendered_with', '=', 'ast', '.', 'literal_eval', '(', 'render', '(', 'entry', '[', "'with'", ']', ',', 'variables', '=', 'self', '.', 'pipeline', '.', 'variables', ',', 'model', '=', 'self', '.', 'pipeline', '.', 'model', ',', 'env', '=', 'self', '.', 'get_merged_env', '(', 'include_os', '=', 'True', ')', ')', ')', 'elif', "'with'", 'in', 'entry', ':', 'rendered_with', '=', 'entry', '[', "'with'", ']', 'else', ':', 'rendered_with', '=', '[', "''", ']', 'for', 'item', 'in', 'rendered_with', ':', 'shells', '.', 'append', '(', '{', "'id'", ':', 'self', '.', 'next_task_id', ',', "'creator'", ':', 'key', ',', "'entry'", ':', 'entry', ',', "'model'", ':', 'self', '.', 'pipeline', '.', 'model', ',', "'env'", ':', 'self', '.', 'get_merged_env', '(', ')', ',', "'item'", ':', 'item', ',', "'dry_run'", ':', 'self', '.', 'pipeline', '.', 'options', '.', 'dry_run', ',', "'debug'", ':', 'self', '.', 'pipeline', '.', 'options', '.', 'debug', ',', "'strict'", ':', 'self', '.', 'pipeline', '.', 'options', '.', 'strict', ',', "'variables'", ':', 'self', '.', 'pipeline', '.', 'variables', ',', "'temporary_scripts_path'", ':', 'self', '.', 'pipeline', '.', 'options', '.', 'temporary_scripts_path', '}', ')', 'self', '.', 'next_task_id', '+=', '1'] | Prepare one shell or docker task. | ['Prepare', 'one', 'shell', 'or', 'docker', 'task', '.'] | train | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L98-L127 |
9,819 | estnltk/estnltk | estnltk/wordnet/wn.py | Synset.lowest_common_hypernyms | def lowest_common_hypernyms(self,target_synset):
"""Returns the common hypernyms of the synset and the target synset, which are furthest from the closest roots.
Parameters
----------
target_synset : Synset
Synset with which the common hypernyms are sought.
Returns
-------
list of Synsets
Common synsets which are the furthest from the closest roots.
"""
self_hypernyms = self._recursive_hypernyms(set())
other_hypernyms = target_synset._recursive_hypernyms(set())
common_hypernyms = self_hypernyms.intersection(other_hypernyms)
annot_common_hypernyms = [(hypernym, hypernym._min_depth()) for hypernym in common_hypernyms]
annot_common_hypernyms.sort(key = lambda annot_hypernym: annot_hypernym[1],reverse=True)
max_depth = annot_common_hypernyms[0][1] if len(annot_common_hypernyms) > 0 else None
if max_depth != None:
return [annot_common_hypernym[0] for annot_common_hypernym in annot_common_hypernyms if annot_common_hypernym[1] == max_depth]
else:
return None | python | def lowest_common_hypernyms(self,target_synset):
"""Returns the common hypernyms of the synset and the target synset, which are furthest from the closest roots.
Parameters
----------
target_synset : Synset
Synset with which the common hypernyms are sought.
Returns
-------
list of Synsets
Common synsets which are the furthest from the closest roots.
"""
self_hypernyms = self._recursive_hypernyms(set())
other_hypernyms = target_synset._recursive_hypernyms(set())
common_hypernyms = self_hypernyms.intersection(other_hypernyms)
annot_common_hypernyms = [(hypernym, hypernym._min_depth()) for hypernym in common_hypernyms]
annot_common_hypernyms.sort(key = lambda annot_hypernym: annot_hypernym[1],reverse=True)
max_depth = annot_common_hypernyms[0][1] if len(annot_common_hypernyms) > 0 else None
if max_depth != None:
return [annot_common_hypernym[0] for annot_common_hypernym in annot_common_hypernyms if annot_common_hypernym[1] == max_depth]
else:
return None | ['def', 'lowest_common_hypernyms', '(', 'self', ',', 'target_synset', ')', ':', 'self_hypernyms', '=', 'self', '.', '_recursive_hypernyms', '(', 'set', '(', ')', ')', 'other_hypernyms', '=', 'target_synset', '.', '_recursive_hypernyms', '(', 'set', '(', ')', ')', 'common_hypernyms', '=', 'self_hypernyms', '.', 'intersection', '(', 'other_hypernyms', ')', 'annot_common_hypernyms', '=', '[', '(', 'hypernym', ',', 'hypernym', '.', '_min_depth', '(', ')', ')', 'for', 'hypernym', 'in', 'common_hypernyms', ']', 'annot_common_hypernyms', '.', 'sort', '(', 'key', '=', 'lambda', 'annot_hypernym', ':', 'annot_hypernym', '[', '1', ']', ',', 'reverse', '=', 'True', ')', 'max_depth', '=', 'annot_common_hypernyms', '[', '0', ']', '[', '1', ']', 'if', 'len', '(', 'annot_common_hypernyms', ')', '>', '0', 'else', 'None', 'if', 'max_depth', '!=', 'None', ':', 'return', '[', 'annot_common_hypernym', '[', '0', ']', 'for', 'annot_common_hypernym', 'in', 'annot_common_hypernyms', 'if', 'annot_common_hypernym', '[', '1', ']', '==', 'max_depth', ']', 'else', ':', 'return', 'None'] | Returns the common hypernyms of the synset and the target synset, which are furthest from the closest roots.
Parameters
----------
target_synset : Synset
Synset with which the common hypernyms are sought.
Returns
-------
list of Synsets
Common synsets which are the furthest from the closest roots. | ['Returns', 'the', 'common', 'hypernyms', 'of', 'the', 'synset', 'and', 'the', 'target', 'synset', 'which', 'are', 'furthest', 'from', 'the', 'closest', 'roots', '.', 'Parameters', '----------', 'target_synset', ':', 'Synset', 'Synset', 'with', 'which', 'the', 'common', 'hypernyms', 'are', 'sought', '.', 'Returns', '-------', 'list', 'of', 'Synsets', 'Common', 'synsets', 'which', 'are', 'the', 'furthest', 'from', 'the', 'closest', 'roots', '.'] | train | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L800-L827 |
9,820 | yehonadav/threaders | threaders/threaders.py | get_first_result | def get_first_result(threads):
""" this blocks, waiting for the first result that returns from a thread
:type threads: list[Thread]
"""
while True:
for thread in threads:
if not thread.is_alive():
return thread.queue.get() | python | def get_first_result(threads):
""" this blocks, waiting for the first result that returns from a thread
:type threads: list[Thread]
"""
while True:
for thread in threads:
if not thread.is_alive():
return thread.queue.get() | ['def', 'get_first_result', '(', 'threads', ')', ':', 'while', 'True', ':', 'for', 'thread', 'in', 'threads', ':', 'if', 'not', 'thread', '.', 'is_alive', '(', ')', ':', 'return', 'thread', '.', 'queue', '.', 'get', '(', ')'] | this blocks, waiting for the first result that returns from a thread
:type threads: list[Thread] | ['this', 'blocks', 'waiting', 'for', 'the', 'first', 'result', 'that', 'returns', 'from', 'a', 'thread', ':', 'type', 'threads', ':', 'list', '[', 'Thread', ']'] | train | https://github.com/yehonadav/threaders/blob/70302c4372e9b951435748abf4146a426005fc8a/threaders/threaders.py#L144-L151 |
9,821 | GoogleCloudPlatform/appengine-pipelines | python/src/pipeline/pipeline.py | _get_internal_slot | def _get_internal_slot(slot_key=None,
filler_pipeline_key=None,
slot_dict=None):
"""Gets information about a _SlotRecord for display in UI.
Args:
slot_key: The db.Key of the slot to fetch.
filler_pipeline_key: In the case the slot has not yet been filled, assume
that the given db.Key (for a _PipelineRecord) will be the filler of
the slot in the future.
slot_dict: The slot JSON dictionary.
Returns:
Dictionary with the keys:
status: Slot status: 'filled' or 'waiting'
fillTimeMs: Time in milliseconds since the epoch of when it was filled.
value: The current value of the slot, which is a slot's JSON dictionary.
fillerPipelineId: The pipeline ID of what stage has or should fill
this slot.
Raises:
PipelineStatusError if any input is bad.
"""
if slot_dict is None:
slot_dict = {}
slot_record = slot_dict.get(slot_key)
if slot_record is None:
raise PipelineStatusError(
'Could not find data for output slot key "%s".' % slot_key)
output = {}
if slot_record.status == _SlotRecord.FILLED:
output['status'] = 'filled'
output['fillTimeMs'] = _get_timestamp_ms(slot_record.fill_time)
output['value'] = slot_record.value
filler_pipeline_key = (
_SlotRecord.filler.get_value_for_datastore(slot_record))
else:
output['status'] = 'waiting'
if filler_pipeline_key:
output['fillerPipelineId'] = filler_pipeline_key.name()
return output | python | def _get_internal_slot(slot_key=None,
filler_pipeline_key=None,
slot_dict=None):
"""Gets information about a _SlotRecord for display in UI.
Args:
slot_key: The db.Key of the slot to fetch.
filler_pipeline_key: In the case the slot has not yet been filled, assume
that the given db.Key (for a _PipelineRecord) will be the filler of
the slot in the future.
slot_dict: The slot JSON dictionary.
Returns:
Dictionary with the keys:
status: Slot status: 'filled' or 'waiting'
fillTimeMs: Time in milliseconds since the epoch of when it was filled.
value: The current value of the slot, which is a slot's JSON dictionary.
fillerPipelineId: The pipeline ID of what stage has or should fill
this slot.
Raises:
PipelineStatusError if any input is bad.
"""
if slot_dict is None:
slot_dict = {}
slot_record = slot_dict.get(slot_key)
if slot_record is None:
raise PipelineStatusError(
'Could not find data for output slot key "%s".' % slot_key)
output = {}
if slot_record.status == _SlotRecord.FILLED:
output['status'] = 'filled'
output['fillTimeMs'] = _get_timestamp_ms(slot_record.fill_time)
output['value'] = slot_record.value
filler_pipeline_key = (
_SlotRecord.filler.get_value_for_datastore(slot_record))
else:
output['status'] = 'waiting'
if filler_pipeline_key:
output['fillerPipelineId'] = filler_pipeline_key.name()
return output | ['def', '_get_internal_slot', '(', 'slot_key', '=', 'None', ',', 'filler_pipeline_key', '=', 'None', ',', 'slot_dict', '=', 'None', ')', ':', 'if', 'slot_dict', 'is', 'None', ':', 'slot_dict', '=', '{', '}', 'slot_record', '=', 'slot_dict', '.', 'get', '(', 'slot_key', ')', 'if', 'slot_record', 'is', 'None', ':', 'raise', 'PipelineStatusError', '(', '\'Could not find data for output slot key "%s".\'', '%', 'slot_key', ')', 'output', '=', '{', '}', 'if', 'slot_record', '.', 'status', '==', '_SlotRecord', '.', 'FILLED', ':', 'output', '[', "'status'", ']', '=', "'filled'", 'output', '[', "'fillTimeMs'", ']', '=', '_get_timestamp_ms', '(', 'slot_record', '.', 'fill_time', ')', 'output', '[', "'value'", ']', '=', 'slot_record', '.', 'value', 'filler_pipeline_key', '=', '(', '_SlotRecord', '.', 'filler', '.', 'get_value_for_datastore', '(', 'slot_record', ')', ')', 'else', ':', 'output', '[', "'status'", ']', '=', "'waiting'", 'if', 'filler_pipeline_key', ':', 'output', '[', "'fillerPipelineId'", ']', '=', 'filler_pipeline_key', '.', 'name', '(', ')', 'return', 'output'] | Gets information about a _SlotRecord for display in UI.
Args:
slot_key: The db.Key of the slot to fetch.
filler_pipeline_key: In the case the slot has not yet been filled, assume
that the given db.Key (for a _PipelineRecord) will be the filler of
the slot in the future.
slot_dict: The slot JSON dictionary.
Returns:
Dictionary with the keys:
status: Slot status: 'filled' or 'waiting'
fillTimeMs: Time in milliseconds since the epoch of when it was filled.
value: The current value of the slot, which is a slot's JSON dictionary.
fillerPipelineId: The pipeline ID of what stage has or should fill
this slot.
Raises:
PipelineStatusError if any input is bad. | ['Gets', 'information', 'about', 'a', '_SlotRecord', 'for', 'display', 'in', 'UI', '.'] | train | https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L3059-L3103 |
9,822 | emc-openstack/storops | storops/unity/resource/remote_system.py | UnityRemoteSystem.verify | def verify(self, connection_type=None):
"""
Verifies and update the remote system settings.
:param connection_type: same as the one in `create` method.
"""
req_body = self._cli.make_body(connectionType=connection_type)
resp = self.action('verify', **req_body)
resp.raise_if_err()
return resp | python | def verify(self, connection_type=None):
"""
Verifies and update the remote system settings.
:param connection_type: same as the one in `create` method.
"""
req_body = self._cli.make_body(connectionType=connection_type)
resp = self.action('verify', **req_body)
resp.raise_if_err()
return resp | ['def', 'verify', '(', 'self', ',', 'connection_type', '=', 'None', ')', ':', 'req_body', '=', 'self', '.', '_cli', '.', 'make_body', '(', 'connectionType', '=', 'connection_type', ')', 'resp', '=', 'self', '.', 'action', '(', "'verify'", ',', '*', '*', 'req_body', ')', 'resp', '.', 'raise_if_err', '(', ')', 'return', 'resp'] | Verifies and update the remote system settings.
:param connection_type: same as the one in `create` method. | ['Verifies', 'and', 'update', 'the', 'remote', 'system', 'settings', '.'] | train | https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/unity/resource/remote_system.py#L77-L87 |
9,823 | dlintott/gns3-converter | gns3converter/main.py | make_qemu_dirs | def make_qemu_dirs(max_qemu_id, output_dir, topology_name):
"""
Create Qemu VM working directories if required
:param int max_qemu_id: Number of directories to create
:param str output_dir: Output directory
:param str topology_name: Topology name
"""
if max_qemu_id is not None:
for i in range(1, max_qemu_id + 1):
qemu_dir = os.path.join(output_dir, topology_name + '-files',
'qemu', 'vm-%s' % i)
os.makedirs(qemu_dir) | python | def make_qemu_dirs(max_qemu_id, output_dir, topology_name):
"""
Create Qemu VM working directories if required
:param int max_qemu_id: Number of directories to create
:param str output_dir: Output directory
:param str topology_name: Topology name
"""
if max_qemu_id is not None:
for i in range(1, max_qemu_id + 1):
qemu_dir = os.path.join(output_dir, topology_name + '-files',
'qemu', 'vm-%s' % i)
os.makedirs(qemu_dir) | ['def', 'make_qemu_dirs', '(', 'max_qemu_id', ',', 'output_dir', ',', 'topology_name', ')', ':', 'if', 'max_qemu_id', 'is', 'not', 'None', ':', 'for', 'i', 'in', 'range', '(', '1', ',', 'max_qemu_id', '+', '1', ')', ':', 'qemu_dir', '=', 'os', '.', 'path', '.', 'join', '(', 'output_dir', ',', 'topology_name', '+', "'-files'", ',', "'qemu'", ',', "'vm-%s'", '%', 'i', ')', 'os', '.', 'makedirs', '(', 'qemu_dir', ')'] | Create Qemu VM working directories if required
:param int max_qemu_id: Number of directories to create
:param str output_dir: Output directory
:param str topology_name: Topology name | ['Create', 'Qemu', 'VM', 'working', 'directories', 'if', 'required'] | train | https://github.com/dlintott/gns3-converter/blob/acbc55da51de86388dc5b5f6da55809b3c86b7ca/gns3converter/main.py#L413-L425 |
9,824 | AlecAivazis/graphql-over-kafka | nautilus/conventions/api.py | update_mutation_inputs | def update_mutation_inputs(service):
"""
Args:
service : The service being updated by the mutation
Returns:
(list) : a list of all of the fields availible for the service. Pk
is a required field in order to filter the results
"""
# grab the default list of field summaries
inputs = _service_mutation_summaries(service)
# visit each field
for field in inputs:
# if we're looking at the id field
if field['name'] == 'id':
# make sure its required
field['required'] = True
# but no other field
else:
# is required
field['required'] = False
# return the final list
return inputs | python | def update_mutation_inputs(service):
"""
Args:
service : The service being updated by the mutation
Returns:
(list) : a list of all of the fields availible for the service. Pk
is a required field in order to filter the results
"""
# grab the default list of field summaries
inputs = _service_mutation_summaries(service)
# visit each field
for field in inputs:
# if we're looking at the id field
if field['name'] == 'id':
# make sure its required
field['required'] = True
# but no other field
else:
# is required
field['required'] = False
# return the final list
return inputs | ['def', 'update_mutation_inputs', '(', 'service', ')', ':', '# grab the default list of field summaries', 'inputs', '=', '_service_mutation_summaries', '(', 'service', ')', '# visit each field', 'for', 'field', 'in', 'inputs', ':', "# if we're looking at the id field", 'if', 'field', '[', "'name'", ']', '==', "'id'", ':', '# make sure its required', 'field', '[', "'required'", ']', '=', 'True', '# but no other field', 'else', ':', '# is required', 'field', '[', "'required'", ']', '=', 'False', '# return the final list', 'return', 'inputs'] | Args:
service : The service being updated by the mutation
Returns:
(list) : a list of all of the fields availible for the service. Pk
is a required field in order to filter the results | ['Args', ':', 'service', ':', 'The', 'service', 'being', 'updated', 'by', 'the', 'mutation', 'Returns', ':', '(', 'list', ')', ':', 'a', 'list', 'of', 'all', 'of', 'the', 'fields', 'availible', 'for', 'the', 'service', '.', 'Pk', 'is', 'a', 'required', 'field', 'in', 'order', 'to', 'filter', 'the', 'results'] | train | https://github.com/AlecAivazis/graphql-over-kafka/blob/70e2acef27a2f87355590be1a6ca60ce3ab4d09c/nautilus/conventions/api.py#L57-L80 |
9,825 | tempodb/tempodb-python | tempodb/protocol/objects.py | SeriesSummary.to_dictionary | def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
d = {'start': self.start.isoformat(),
'end': self.end.isoformat(),
'tz': self.tz,
'summary': self.summary.to_dictionary(),
'series': self.series.to_dictionary()
}
return d | python | def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
d = {'start': self.start.isoformat(),
'end': self.end.isoformat(),
'tz': self.tz,
'summary': self.summary.to_dictionary(),
'series': self.series.to_dictionary()
}
return d | ['def', 'to_dictionary', '(', 'self', ')', ':', 'd', '=', '{', "'start'", ':', 'self', '.', 'start', '.', 'isoformat', '(', ')', ',', "'end'", ':', 'self', '.', 'end', '.', 'isoformat', '(', ')', ',', "'tz'", ':', 'self', '.', 'tz', ',', "'summary'", ':', 'self', '.', 'summary', '.', 'to_dictionary', '(', ')', ',', "'series'", ':', 'self', '.', 'series', '.', 'to_dictionary', '(', ')', '}', 'return', 'd'] | Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string. | ['Serialize', 'an', 'object', 'into', 'dictionary', 'form', '.', 'Useful', 'if', 'you', 'have', 'to', 'serialize', 'an', 'array', 'of', 'objects', 'into', 'JSON', '.', 'Otherwise', 'if', 'you', 'call', 'the', ':', 'meth', ':', 'to_json', 'method', 'on', 'each', 'object', 'in', 'the', 'list', 'and', 'then', 'try', 'to', 'dump', 'the', 'array', 'you', 'end', 'up', 'with', 'an', 'array', 'with', 'one', 'string', '.'] | train | https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/protocol/objects.py#L229-L241 |
9,826 | kejbaly2/metrique | metrique/utils.py | clear_stale_pids | def clear_stale_pids(pids, pid_dir='/tmp', prefix='', multi=False):
'check for and remove any pids which have no corresponding process'
if isinstance(pids, (int, float, long)):
pids = [pids]
pids = str2list(pids, map_=unicode)
procs = map(unicode, os.listdir('/proc'))
running = [pid for pid in pids if pid in procs]
logger.warn(
"Found %s pids running: %s" % (len(running),
running))
prefix = prefix.rstrip('.') if prefix else None
for pid in pids:
if prefix:
_prefix = prefix
else:
_prefix = unicode(pid)
# remove non-running procs
if pid in running:
continue
if multi:
pid_file = '%s%s.pid' % (_prefix, pid)
else:
pid_file = '%s.pid' % (_prefix)
path = os.path.join(pid_dir, pid_file)
if os.path.exists(path):
logger.debug("Removing pidfile: %s" % path)
try:
remove_file(path)
except OSError as e:
logger.warn(e)
return running | python | def clear_stale_pids(pids, pid_dir='/tmp', prefix='', multi=False):
'check for and remove any pids which have no corresponding process'
if isinstance(pids, (int, float, long)):
pids = [pids]
pids = str2list(pids, map_=unicode)
procs = map(unicode, os.listdir('/proc'))
running = [pid for pid in pids if pid in procs]
logger.warn(
"Found %s pids running: %s" % (len(running),
running))
prefix = prefix.rstrip('.') if prefix else None
for pid in pids:
if prefix:
_prefix = prefix
else:
_prefix = unicode(pid)
# remove non-running procs
if pid in running:
continue
if multi:
pid_file = '%s%s.pid' % (_prefix, pid)
else:
pid_file = '%s.pid' % (_prefix)
path = os.path.join(pid_dir, pid_file)
if os.path.exists(path):
logger.debug("Removing pidfile: %s" % path)
try:
remove_file(path)
except OSError as e:
logger.warn(e)
return running | ['def', 'clear_stale_pids', '(', 'pids', ',', 'pid_dir', '=', "'/tmp'", ',', 'prefix', '=', "''", ',', 'multi', '=', 'False', ')', ':', 'if', 'isinstance', '(', 'pids', ',', '(', 'int', ',', 'float', ',', 'long', ')', ')', ':', 'pids', '=', '[', 'pids', ']', 'pids', '=', 'str2list', '(', 'pids', ',', 'map_', '=', 'unicode', ')', 'procs', '=', 'map', '(', 'unicode', ',', 'os', '.', 'listdir', '(', "'/proc'", ')', ')', 'running', '=', '[', 'pid', 'for', 'pid', 'in', 'pids', 'if', 'pid', 'in', 'procs', ']', 'logger', '.', 'warn', '(', '"Found %s pids running: %s"', '%', '(', 'len', '(', 'running', ')', ',', 'running', ')', ')', 'prefix', '=', 'prefix', '.', 'rstrip', '(', "'.'", ')', 'if', 'prefix', 'else', 'None', 'for', 'pid', 'in', 'pids', ':', 'if', 'prefix', ':', '_prefix', '=', 'prefix', 'else', ':', '_prefix', '=', 'unicode', '(', 'pid', ')', '# remove non-running procs', 'if', 'pid', 'in', 'running', ':', 'continue', 'if', 'multi', ':', 'pid_file', '=', "'%s%s.pid'", '%', '(', '_prefix', ',', 'pid', ')', 'else', ':', 'pid_file', '=', "'%s.pid'", '%', '(', '_prefix', ')', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'pid_dir', ',', 'pid_file', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'logger', '.', 'debug', '(', '"Removing pidfile: %s"', '%', 'path', ')', 'try', ':', 'remove_file', '(', 'path', ')', 'except', 'OSError', 'as', 'e', ':', 'logger', '.', 'warn', '(', 'e', ')', 'return', 'running'] | check for and remove any pids which have no corresponding process | ['check', 'for', 'and', 'remove', 'any', 'pids', 'which', 'have', 'no', 'corresponding', 'process'] | train | https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L197-L227 |
9,827 | gabstopper/smc-python | smc/core/engine_vss.py | VSSContainer.remove_security_group | def remove_security_group(self, name):
"""
Remove a security group from container
"""
for group in self.security_groups:
if group.isc_name == name:
group.delete() | python | def remove_security_group(self, name):
"""
Remove a security group from container
"""
for group in self.security_groups:
if group.isc_name == name:
group.delete() | ['def', 'remove_security_group', '(', 'self', ',', 'name', ')', ':', 'for', 'group', 'in', 'self', '.', 'security_groups', ':', 'if', 'group', '.', 'isc_name', '==', 'name', ':', 'group', '.', 'delete', '(', ')'] | Remove a security group from container | ['Remove', 'a', 'security', 'group', 'from', 'container'] | train | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/engine_vss.py#L111-L117 |
9,828 | ktbyers/netmiko | netmiko/cisco/cisco_ios.py | InLineTransfer.config_md5 | def config_md5(self, source_config):
"""Compute MD5 hash of file."""
file_contents = source_config + "\n" # Cisco IOS automatically adds this
file_contents = file_contents.encode("UTF-8")
return hashlib.md5(file_contents).hexdigest() | python | def config_md5(self, source_config):
"""Compute MD5 hash of file."""
file_contents = source_config + "\n" # Cisco IOS automatically adds this
file_contents = file_contents.encode("UTF-8")
return hashlib.md5(file_contents).hexdigest() | ['def', 'config_md5', '(', 'self', ',', 'source_config', ')', ':', 'file_contents', '=', 'source_config', '+', '"\\n"', '# Cisco IOS automatically adds this', 'file_contents', '=', 'file_contents', '.', 'encode', '(', '"UTF-8"', ')', 'return', 'hashlib', '.', 'md5', '(', 'file_contents', ')', '.', 'hexdigest', '(', ')'] | Compute MD5 hash of file. | ['Compute', 'MD5', 'hash', 'of', 'file', '.'] | train | https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/cisco/cisco_ios.py#L174-L178 |
9,829 | modlinltd/django-advanced-filters | advanced_filters/forms.py | AdvancedFilterQueryForm._build_query_dict | def _build_query_dict(self, formdata=None):
"""
Take submitted data from form and create a query dict to be
used in a Q object (or filter)
"""
if self.is_valid() and formdata is None:
formdata = self.cleaned_data
key = "{field}__{operator}".format(**formdata)
if formdata['operator'] == "isnull":
return {key: None}
elif formdata['operator'] == "istrue":
return {formdata['field']: True}
elif formdata['operator'] == "isfalse":
return {formdata['field']: False}
return {key: formdata['value']} | python | def _build_query_dict(self, formdata=None):
"""
Take submitted data from form and create a query dict to be
used in a Q object (or filter)
"""
if self.is_valid() and formdata is None:
formdata = self.cleaned_data
key = "{field}__{operator}".format(**formdata)
if formdata['operator'] == "isnull":
return {key: None}
elif formdata['operator'] == "istrue":
return {formdata['field']: True}
elif formdata['operator'] == "isfalse":
return {formdata['field']: False}
return {key: formdata['value']} | ['def', '_build_query_dict', '(', 'self', ',', 'formdata', '=', 'None', ')', ':', 'if', 'self', '.', 'is_valid', '(', ')', 'and', 'formdata', 'is', 'None', ':', 'formdata', '=', 'self', '.', 'cleaned_data', 'key', '=', '"{field}__{operator}"', '.', 'format', '(', '*', '*', 'formdata', ')', 'if', 'formdata', '[', "'operator'", ']', '==', '"isnull"', ':', 'return', '{', 'key', ':', 'None', '}', 'elif', 'formdata', '[', "'operator'", ']', '==', '"istrue"', ':', 'return', '{', 'formdata', '[', "'field'", ']', ':', 'True', '}', 'elif', 'formdata', '[', "'operator'", ']', '==', '"isfalse"', ':', 'return', '{', 'formdata', '[', "'field'", ']', ':', 'False', '}', 'return', '{', 'key', ':', 'formdata', '[', "'value'", ']', '}'] | Take submitted data from form and create a query dict to be
used in a Q object (or filter) | ['Take', 'submitted', 'data', 'from', 'form', 'and', 'create', 'a', 'query', 'dict', 'to', 'be', 'used', 'in', 'a', 'Q', 'object', '(', 'or', 'filter', ')'] | train | https://github.com/modlinltd/django-advanced-filters/blob/ba51e6946d1652796a82b2b95cceffbe1190a227/advanced_filters/forms.py#L86-L100 |
9,830 | huyingxi/Synonyms | synonyms/utils.py | deaccent | def deaccent(text):
"""
Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result) | python | def deaccent(text):
"""
Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result) | ['def', 'deaccent', '(', 'text', ')', ':', 'if', 'not', 'isinstance', '(', 'text', ',', 'unicode', ')', ':', '# assume utf8 for byte strings, use default (strict) error handling', 'text', '=', 'text', '.', 'decode', '(', "'utf8'", ')', 'norm', '=', 'unicodedata', '.', 'normalize', '(', '"NFD"', ',', 'text', ')', 'result', '=', 'u', '(', "''", ')', '.', 'join', '(', 'ch', 'for', 'ch', 'in', 'norm', 'if', 'unicodedata', '.', 'category', '(', 'ch', ')', '!=', "'Mn'", ')', 'return', 'unicodedata', '.', 'normalize', '(', '"NFC"', ',', 'result', ')'] | Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek' | ['Remove', 'accentuation', 'from', 'the', 'given', 'string', '.', 'Input', 'text', 'is', 'either', 'a', 'unicode', 'string', 'or', 'utf8', 'encoded', 'bytestring', '.'] | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L140-L155 |
9,831 | ellmetha/django-machina | machina/apps/forum_moderation/views.py | TopicLockView.get_success_url | def get_success_url(self):
""" Returns the success URL to redirect the user to. """
return reverse(
'forum_conversation:topic',
kwargs={
'forum_slug': self.object.forum.slug,
'forum_pk': self.object.forum.pk,
'slug': self.object.slug,
'pk': self.object.pk,
},
) | python | def get_success_url(self):
""" Returns the success URL to redirect the user to. """
return reverse(
'forum_conversation:topic',
kwargs={
'forum_slug': self.object.forum.slug,
'forum_pk': self.object.forum.pk,
'slug': self.object.slug,
'pk': self.object.pk,
},
) | ['def', 'get_success_url', '(', 'self', ')', ':', 'return', 'reverse', '(', "'forum_conversation:topic'", ',', 'kwargs', '=', '{', "'forum_slug'", ':', 'self', '.', 'object', '.', 'forum', '.', 'slug', ',', "'forum_pk'", ':', 'self', '.', 'object', '.', 'forum', '.', 'pk', ',', "'slug'", ':', 'self', '.', 'object', '.', 'slug', ',', "'pk'", ':', 'self', '.', 'object', '.', 'pk', ',', '}', ',', ')'] | Returns the success URL to redirect the user to. | ['Returns', 'the', 'success', 'URL', 'to', 'redirect', 'the', 'user', 'to', '.'] | train | https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_moderation/views.py#L62-L72 |
9,832 | glitchassassin/lackey | lackey/PlatformManagerDarwin.py | PlatformManagerDarwin.getBitmapFromRect | def getBitmapFromRect(self, x, y, w, h):
""" Capture the specified area of the (virtual) screen. """
min_x, min_y, screen_width, screen_height = self._getVirtualScreenRect()
img = self._getVirtualScreenBitmap() # TODO
# Limit the coordinates to the virtual screen
# Then offset so 0,0 is the top left corner of the image
# (Top left of virtual screen could be negative)
x1 = min(max(min_x, x), min_x+screen_width) - min_x
y1 = min(max(min_y, y), min_y+screen_height) - min_y
x2 = min(max(min_x, x+w), min_x+screen_width) - min_x
y2 = min(max(min_y, y+h), min_y+screen_height) - min_y
return numpy.array(img.crop((x1, y1, x2, y2))) | python | def getBitmapFromRect(self, x, y, w, h):
""" Capture the specified area of the (virtual) screen. """
min_x, min_y, screen_width, screen_height = self._getVirtualScreenRect()
img = self._getVirtualScreenBitmap() # TODO
# Limit the coordinates to the virtual screen
# Then offset so 0,0 is the top left corner of the image
# (Top left of virtual screen could be negative)
x1 = min(max(min_x, x), min_x+screen_width) - min_x
y1 = min(max(min_y, y), min_y+screen_height) - min_y
x2 = min(max(min_x, x+w), min_x+screen_width) - min_x
y2 = min(max(min_y, y+h), min_y+screen_height) - min_y
return numpy.array(img.crop((x1, y1, x2, y2))) | ['def', 'getBitmapFromRect', '(', 'self', ',', 'x', ',', 'y', ',', 'w', ',', 'h', ')', ':', 'min_x', ',', 'min_y', ',', 'screen_width', ',', 'screen_height', '=', 'self', '.', '_getVirtualScreenRect', '(', ')', 'img', '=', 'self', '.', '_getVirtualScreenBitmap', '(', ')', '# TODO', '# Limit the coordinates to the virtual screen', '# Then offset so 0,0 is the top left corner of the image', '# (Top left of virtual screen could be negative)', 'x1', '=', 'min', '(', 'max', '(', 'min_x', ',', 'x', ')', ',', 'min_x', '+', 'screen_width', ')', '-', 'min_x', 'y1', '=', 'min', '(', 'max', '(', 'min_y', ',', 'y', ')', ',', 'min_y', '+', 'screen_height', ')', '-', 'min_y', 'x2', '=', 'min', '(', 'max', '(', 'min_x', ',', 'x', '+', 'w', ')', ',', 'min_x', '+', 'screen_width', ')', '-', 'min_x', 'y2', '=', 'min', '(', 'max', '(', 'min_y', ',', 'y', '+', 'h', ')', ',', 'min_y', '+', 'screen_height', ')', '-', 'min_y', 'return', 'numpy', '.', 'array', '(', 'img', '.', 'crop', '(', '(', 'x1', ',', 'y1', ',', 'x2', ',', 'y2', ')', ')', ')'] | Capture the specified area of the (virtual) screen. | ['Capture', 'the', 'specified', 'area', 'of', 'the', '(', 'virtual', ')', 'screen', '.'] | train | https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerDarwin.py#L199-L210 |
9,833 | NeuroML/pyNeuroML | pyneuroml/povray/NeuroML2ToPOVRay.py | process_args | def process_args():
"""
Parse command-line arguments.
"""
parser = argparse.ArgumentParser(description="A file for converting NeuroML v2 files into POVRay files for 3D rendering")
parser.add_argument('neuroml_file', type=str, metavar='<NeuroML file>',
help='NeuroML (version 2 beta 3+) file to be converted to PovRay format (XML or HDF5 format)')
parser.add_argument('-split',
action='store_true',
default=False,
help="If this is specified, generate separate pov files for cells & network. Default is false")
parser.add_argument('-background',
type=str,
metavar='<background colour>',
default=_WHITE,
help='Colour of background, e.g. <0,0,0,0.55>')
parser.add_argument('-movie',
action='store_true',
default=False,
help="If this is specified, generate a ini file for generating a sequence of frames for a movie of the 3D structure")
parser.add_argument('-inputs',
action='store_true',
default=False,
help="If this is specified, show the locations of (synaptic, current clamp, etc.) inputs into the cells of the network")
parser.add_argument('-conns',
action='store_true',
default=False,
help="If this is specified, show the connections present in the network with lines")
parser.add_argument('-conn_points',
action='store_true',
default=False,
help="If this is specified, show the end points of the connections present in the network")
parser.add_argument('-v',
action='store_true',
default=False,
help="Verbose output")
parser.add_argument('-frames',
type=int,
metavar='<frames>',
default=36,
help='Number of frames in movie')
parser.add_argument('-posx',
type=float,
metavar='<position offset x>',
default=0,
help='Offset position in x dir (0 is centre, 1 is top)')
parser.add_argument('-posy',
type=float,
metavar='<position offset y>',
default=0,
help='Offset position in y dir (0 is centre, 1 is top)')
parser.add_argument('-posz',
type=float,
metavar='<position offset z>',
default=0,
help='Offset position in z dir (0 is centre, 1 is top)')
parser.add_argument('-viewx',
type=float,
metavar='<view offset x>',
default=0,
help='Offset viewing point in x dir (0 is centre, 1 is top)')
parser.add_argument('-viewy',
type=float,
metavar='<view offset y>',
default=0,
help='Offset viewing point in y dir (0 is centre, 1 is top)')
parser.add_argument('-viewz',
type=float,
metavar='<view offset z>',
default=0,
help='Offset viewing point in z dir (0 is centre, 1 is top)')
parser.add_argument('-scalex',
type=float,
metavar='<scale position x>',
default=1,
help='Scale position from network in x dir')
parser.add_argument('-scaley',
type=float,
metavar='<scale position y>',
default=1.5,
help='Scale position from network in y dir')
parser.add_argument('-scalez',
type=float,
metavar='<scale position z>',
default=1,
help='Scale position from network in z dir')
parser.add_argument('-mindiam',
type=float,
metavar='<minimum diameter dendrites/axons>',
default=0,
help='Minimum diameter for dendrites/axons (to improve visualisations)')
parser.add_argument('-plane',
action='store_true',
default=False,
help="If this is specified, add a 2D plane below cell/network")
parser.add_argument('-segids',
action='store_true',
default=False,
help="Show segment ids")
return parser.parse_args() | python | def process_args():
"""
Parse command-line arguments.
"""
parser = argparse.ArgumentParser(description="A file for converting NeuroML v2 files into POVRay files for 3D rendering")
parser.add_argument('neuroml_file', type=str, metavar='<NeuroML file>',
help='NeuroML (version 2 beta 3+) file to be converted to PovRay format (XML or HDF5 format)')
parser.add_argument('-split',
action='store_true',
default=False,
help="If this is specified, generate separate pov files for cells & network. Default is false")
parser.add_argument('-background',
type=str,
metavar='<background colour>',
default=_WHITE,
help='Colour of background, e.g. <0,0,0,0.55>')
parser.add_argument('-movie',
action='store_true',
default=False,
help="If this is specified, generate a ini file for generating a sequence of frames for a movie of the 3D structure")
parser.add_argument('-inputs',
action='store_true',
default=False,
help="If this is specified, show the locations of (synaptic, current clamp, etc.) inputs into the cells of the network")
parser.add_argument('-conns',
action='store_true',
default=False,
help="If this is specified, show the connections present in the network with lines")
parser.add_argument('-conn_points',
action='store_true',
default=False,
help="If this is specified, show the end points of the connections present in the network")
parser.add_argument('-v',
action='store_true',
default=False,
help="Verbose output")
parser.add_argument('-frames',
type=int,
metavar='<frames>',
default=36,
help='Number of frames in movie')
parser.add_argument('-posx',
type=float,
metavar='<position offset x>',
default=0,
help='Offset position in x dir (0 is centre, 1 is top)')
parser.add_argument('-posy',
type=float,
metavar='<position offset y>',
default=0,
help='Offset position in y dir (0 is centre, 1 is top)')
parser.add_argument('-posz',
type=float,
metavar='<position offset z>',
default=0,
help='Offset position in z dir (0 is centre, 1 is top)')
parser.add_argument('-viewx',
type=float,
metavar='<view offset x>',
default=0,
help='Offset viewing point in x dir (0 is centre, 1 is top)')
parser.add_argument('-viewy',
type=float,
metavar='<view offset y>',
default=0,
help='Offset viewing point in y dir (0 is centre, 1 is top)')
parser.add_argument('-viewz',
type=float,
metavar='<view offset z>',
default=0,
help='Offset viewing point in z dir (0 is centre, 1 is top)')
parser.add_argument('-scalex',
type=float,
metavar='<scale position x>',
default=1,
help='Scale position from network in x dir')
parser.add_argument('-scaley',
type=float,
metavar='<scale position y>',
default=1.5,
help='Scale position from network in y dir')
parser.add_argument('-scalez',
type=float,
metavar='<scale position z>',
default=1,
help='Scale position from network in z dir')
parser.add_argument('-mindiam',
type=float,
metavar='<minimum diameter dendrites/axons>',
default=0,
help='Minimum diameter for dendrites/axons (to improve visualisations)')
parser.add_argument('-plane',
action='store_true',
default=False,
help="If this is specified, add a 2D plane below cell/network")
parser.add_argument('-segids',
action='store_true',
default=False,
help="Show segment ids")
return parser.parse_args() | ['def', 'process_args', '(', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', '"A file for converting NeuroML v2 files into POVRay files for 3D rendering"', ')', 'parser', '.', 'add_argument', '(', "'neuroml_file'", ',', 'type', '=', 'str', ',', 'metavar', '=', "'<NeuroML file>'", ',', 'help', '=', "'NeuroML (version 2 beta 3+) file to be converted to PovRay format (XML or HDF5 format)'", ')', 'parser', '.', 'add_argument', '(', "'-split'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', '"If this is specified, generate separate pov files for cells & network. Default is false"', ')', 'parser', '.', 'add_argument', '(', "'-background'", ',', 'type', '=', 'str', ',', 'metavar', '=', "'<background colour>'", ',', 'default', '=', '_WHITE', ',', 'help', '=', "'Colour of background, e.g. <0,0,0,0.55>'", ')', 'parser', '.', 'add_argument', '(', "'-movie'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', '"If this is specified, generate a ini file for generating a sequence of frames for a movie of the 3D structure"', ')', 'parser', '.', 'add_argument', '(', "'-inputs'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', '"If this is specified, show the locations of (synaptic, current clamp, etc.) inputs into the cells of the network"', ')', 'parser', '.', 'add_argument', '(', "'-conns'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', '"If this is specified, show the connections present in the network with lines"', ')', 'parser', '.', 'add_argument', '(', "'-conn_points'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', '"If this is specified, show the end points of the connections present in the network"', ')', 'parser', '.', 'add_argument', '(', "'-v'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', '"Verbose output"', ')', 'parser', '.', 'add_argument', '(', "'-frames'", ',', 'type', '=', 'int', ',', 'metavar', '=', "'<frames>'", ',', 'default', '=', '36', ',', 'help', '=', "'Number of frames in movie'", ')', 'parser', '.', 'add_argument', '(', "'-posx'", ',', 'type', '=', 'float', ',', 'metavar', '=', "'<position offset x>'", ',', 'default', '=', '0', ',', 'help', '=', "'Offset position in x dir (0 is centre, 1 is top)'", ')', 'parser', '.', 'add_argument', '(', "'-posy'", ',', 'type', '=', 'float', ',', 'metavar', '=', "'<position offset y>'", ',', 'default', '=', '0', ',', 'help', '=', "'Offset position in y dir (0 is centre, 1 is top)'", ')', 'parser', '.', 'add_argument', '(', "'-posz'", ',', 'type', '=', 'float', ',', 'metavar', '=', "'<position offset z>'", ',', 'default', '=', '0', ',', 'help', '=', "'Offset position in z dir (0 is centre, 1 is top)'", ')', 'parser', '.', 'add_argument', '(', "'-viewx'", ',', 'type', '=', 'float', ',', 'metavar', '=', "'<view offset x>'", ',', 'default', '=', '0', ',', 'help', '=', "'Offset viewing point in x dir (0 is centre, 1 is top)'", ')', 'parser', '.', 'add_argument', '(', "'-viewy'", ',', 'type', '=', 'float', ',', 'metavar', '=', "'<view offset y>'", ',', 'default', '=', '0', ',', 'help', '=', "'Offset viewing point in y dir (0 is centre, 1 is top)'", ')', 'parser', '.', 'add_argument', '(', "'-viewz'", ',', 'type', '=', 'float', ',', 'metavar', '=', "'<view offset z>'", ',', 'default', '=', '0', ',', 'help', '=', "'Offset viewing point in z dir (0 is centre, 1 is top)'", ')', 'parser', '.', 'add_argument', '(', "'-scalex'", ',', 'type', '=', 'float', ',', 'metavar', '=', "'<scale position x>'", ',', 'default', '=', '1', ',', 'help', '=', "'Scale position from network in x dir'", ')', 'parser', '.', 'add_argument', '(', "'-scaley'", ',', 'type', '=', 'float', ',', 'metavar', '=', "'<scale position y>'", ',', 'default', '=', '1.5', ',', 'help', '=', "'Scale position from network in y dir'", ')', 'parser', '.', 'add_argument', '(', "'-scalez'", ',', 'type', '=', 'float', ',', 'metavar', '=', "'<scale position z>'", ',', 'default', '=', '1', ',', 'help', '=', "'Scale position from network in z dir'", ')', 'parser', '.', 'add_argument', '(', "'-mindiam'", ',', 'type', '=', 'float', ',', 'metavar', '=', "'<minimum diameter dendrites/axons>'", ',', 'default', '=', '0', ',', 'help', '=', "'Minimum diameter for dendrites/axons (to improve visualisations)'", ')', 'parser', '.', 'add_argument', '(', "'-plane'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', '"If this is specified, add a 2D plane below cell/network"', ')', 'parser', '.', 'add_argument', '(', "'-segids'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', '"Show segment ids"', ')', 'return', 'parser', '.', 'parse_args', '(', ')'] | Parse command-line arguments. | ['Parse', 'command', '-', 'line', 'arguments', '.'] | train | https://github.com/NeuroML/pyNeuroML/blob/aeba2e3040b360bb26556f643cccbfb3dac3b8fb/pyneuroml/povray/NeuroML2ToPOVRay.py#L27-L143 |
9,834 | rigetti/grove | grove/tomography/state_tomography.py | state_tomography_programs | def state_tomography_programs(state_prep, qubits=None,
rotation_generator=tomography.default_rotations):
"""
Yield tomographic sequences that prepare a state with Quil program `state_prep` and then append
tomographic rotations on the specified `qubits`. If `qubits is None`, it assumes all qubits in
the program should be tomographically rotated.
:param Program state_prep: The program to prepare the state to be tomographed.
:param list|NoneType qubits: A list of Qubits or Numbers, to perform the tomography on. If
`None`, performs it on all in state_prep.
:param generator rotation_generator: A generator that yields tomography rotations to perform.
:return: Program for state tomography.
:rtype: Program
"""
if qubits is None:
qubits = state_prep.get_qubits()
for tomography_program in rotation_generator(*qubits):
state_tomography_program = Program(Pragma("PRESERVE_BLOCK"))
state_tomography_program.inst(state_prep)
state_tomography_program.inst(tomography_program)
state_tomography_program.inst(Pragma("END_PRESERVE_BLOCK"))
yield state_tomography_program | python | def state_tomography_programs(state_prep, qubits=None,
rotation_generator=tomography.default_rotations):
"""
Yield tomographic sequences that prepare a state with Quil program `state_prep` and then append
tomographic rotations on the specified `qubits`. If `qubits is None`, it assumes all qubits in
the program should be tomographically rotated.
:param Program state_prep: The program to prepare the state to be tomographed.
:param list|NoneType qubits: A list of Qubits or Numbers, to perform the tomography on. If
`None`, performs it on all in state_prep.
:param generator rotation_generator: A generator that yields tomography rotations to perform.
:return: Program for state tomography.
:rtype: Program
"""
if qubits is None:
qubits = state_prep.get_qubits()
for tomography_program in rotation_generator(*qubits):
state_tomography_program = Program(Pragma("PRESERVE_BLOCK"))
state_tomography_program.inst(state_prep)
state_tomography_program.inst(tomography_program)
state_tomography_program.inst(Pragma("END_PRESERVE_BLOCK"))
yield state_tomography_program | ['def', 'state_tomography_programs', '(', 'state_prep', ',', 'qubits', '=', 'None', ',', 'rotation_generator', '=', 'tomography', '.', 'default_rotations', ')', ':', 'if', 'qubits', 'is', 'None', ':', 'qubits', '=', 'state_prep', '.', 'get_qubits', '(', ')', 'for', 'tomography_program', 'in', 'rotation_generator', '(', '*', 'qubits', ')', ':', 'state_tomography_program', '=', 'Program', '(', 'Pragma', '(', '"PRESERVE_BLOCK"', ')', ')', 'state_tomography_program', '.', 'inst', '(', 'state_prep', ')', 'state_tomography_program', '.', 'inst', '(', 'tomography_program', ')', 'state_tomography_program', '.', 'inst', '(', 'Pragma', '(', '"END_PRESERVE_BLOCK"', ')', ')', 'yield', 'state_tomography_program'] | Yield tomographic sequences that prepare a state with Quil program `state_prep` and then append
tomographic rotations on the specified `qubits`. If `qubits is None`, it assumes all qubits in
the program should be tomographically rotated.
:param Program state_prep: The program to prepare the state to be tomographed.
:param list|NoneType qubits: A list of Qubits or Numbers, to perform the tomography on. If
`None`, performs it on all in state_prep.
:param generator rotation_generator: A generator that yields tomography rotations to perform.
:return: Program for state tomography.
:rtype: Program | ['Yield', 'tomographic', 'sequences', 'that', 'prepare', 'a', 'state', 'with', 'Quil', 'program', 'state_prep', 'and', 'then', 'append', 'tomographic', 'rotations', 'on', 'the', 'specified', 'qubits', '.', 'If', 'qubits', 'is', 'None', 'it', 'assumes', 'all', 'qubits', 'in', 'the', 'program', 'should', 'be', 'tomographically', 'rotated', '.'] | train | https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/tomography/state_tomography.py#L223-L244 |
9,835 | sdispater/orator | orator/migrations/database_migration_repository.py | DatabaseMigrationRepository.log | def log(self, file, batch):
"""
Log that a migration was run.
:type file: str
:type batch: int
"""
record = {"migration": file, "batch": batch}
self.table().insert(**record) | python | def log(self, file, batch):
"""
Log that a migration was run.
:type file: str
:type batch: int
"""
record = {"migration": file, "batch": batch}
self.table().insert(**record) | ['def', 'log', '(', 'self', ',', 'file', ',', 'batch', ')', ':', 'record', '=', '{', '"migration"', ':', 'file', ',', '"batch"', ':', 'batch', '}', 'self', '.', 'table', '(', ')', '.', 'insert', '(', '*', '*', 'record', ')'] | Log that a migration was run.
:type file: str
:type batch: int | ['Log', 'that', 'a', 'migration', 'was', 'run', '.'] | train | https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/migrations/database_migration_repository.py#L34-L43 |
9,836 | getsentry/sentry-python | sentry_sdk/utils.py | event_hint_with_exc_info | def event_hint_with_exc_info(exc_info=None):
# type: (ExcInfo) -> Dict[str, Optional[ExcInfo]]
"""Creates a hint with the exc info filled in."""
if exc_info is None:
exc_info = sys.exc_info()
else:
exc_info = exc_info_from_error(exc_info)
if exc_info[0] is None:
exc_info = None
return {"exc_info": exc_info} | python | def event_hint_with_exc_info(exc_info=None):
# type: (ExcInfo) -> Dict[str, Optional[ExcInfo]]
"""Creates a hint with the exc info filled in."""
if exc_info is None:
exc_info = sys.exc_info()
else:
exc_info = exc_info_from_error(exc_info)
if exc_info[0] is None:
exc_info = None
return {"exc_info": exc_info} | ['def', 'event_hint_with_exc_info', '(', 'exc_info', '=', 'None', ')', ':', '# type: (ExcInfo) -> Dict[str, Optional[ExcInfo]]', 'if', 'exc_info', 'is', 'None', ':', 'exc_info', '=', 'sys', '.', 'exc_info', '(', ')', 'else', ':', 'exc_info', '=', 'exc_info_from_error', '(', 'exc_info', ')', 'if', 'exc_info', '[', '0', ']', 'is', 'None', ':', 'exc_info', '=', 'None', 'return', '{', '"exc_info"', ':', 'exc_info', '}'] | Creates a hint with the exc info filled in. | ['Creates', 'a', 'hint', 'with', 'the', 'exc', 'info', 'filled', 'in', '.'] | train | https://github.com/getsentry/sentry-python/blob/a1d77722bdce0b94660ebf50b5c4a4645916d084/sentry_sdk/utils.py#L81-L90 |
9,837 | OnroerendErfgoed/language-tags | language_tags/Tag.py | Tag.format | def format(self):
"""
Get format according to algorithm defined in RFC 5646 section 2.1.1.
:return: formatted tag string.
"""
tag = self.data['tag']
subtags = tag.split('-')
if len(subtags) == 1:
return subtags[0]
formatted_tag = subtags[0]
private_tag = False
for i, subtag in enumerate(subtags[1:]):
if len(subtags[i]) == 1 or private_tag:
formatted_tag += '-' + subtag
private_tag = True
elif len(subtag) == 2:
formatted_tag += '-' + subtag.upper()
elif len(subtag) == 4:
formatted_tag += '-' + subtag.capitalize()
else:
formatted_tag += '-' + subtag
return formatted_tag | python | def format(self):
"""
Get format according to algorithm defined in RFC 5646 section 2.1.1.
:return: formatted tag string.
"""
tag = self.data['tag']
subtags = tag.split('-')
if len(subtags) == 1:
return subtags[0]
formatted_tag = subtags[0]
private_tag = False
for i, subtag in enumerate(subtags[1:]):
if len(subtags[i]) == 1 or private_tag:
formatted_tag += '-' + subtag
private_tag = True
elif len(subtag) == 2:
formatted_tag += '-' + subtag.upper()
elif len(subtag) == 4:
formatted_tag += '-' + subtag.capitalize()
else:
formatted_tag += '-' + subtag
return formatted_tag | ['def', 'format', '(', 'self', ')', ':', 'tag', '=', 'self', '.', 'data', '[', "'tag'", ']', 'subtags', '=', 'tag', '.', 'split', '(', "'-'", ')', 'if', 'len', '(', 'subtags', ')', '==', '1', ':', 'return', 'subtags', '[', '0', ']', 'formatted_tag', '=', 'subtags', '[', '0', ']', 'private_tag', '=', 'False', 'for', 'i', ',', 'subtag', 'in', 'enumerate', '(', 'subtags', '[', '1', ':', ']', ')', ':', 'if', 'len', '(', 'subtags', '[', 'i', ']', ')', '==', '1', 'or', 'private_tag', ':', 'formatted_tag', '+=', "'-'", '+', 'subtag', 'private_tag', '=', 'True', 'elif', 'len', '(', 'subtag', ')', '==', '2', ':', 'formatted_tag', '+=', "'-'", '+', 'subtag', '.', 'upper', '(', ')', 'elif', 'len', '(', 'subtag', ')', '==', '4', ':', 'formatted_tag', '+=', "'-'", '+', 'subtag', '.', 'capitalize', '(', ')', 'else', ':', 'formatted_tag', '+=', "'-'", '+', 'subtag', 'return', 'formatted_tag'] | Get format according to algorithm defined in RFC 5646 section 2.1.1.
:return: formatted tag string. | ['Get', 'format', 'according', 'to', 'algorithm', 'defined', 'in', 'RFC', '5646', 'section', '2', '.', '1', '.', '1', '.'] | train | https://github.com/OnroerendErfgoed/language-tags/blob/acb91e5458d22617f344e2eefaba9a9865373fdd/language_tags/Tag.py#L118-L146 |
9,838 | DarkEnergySurvey/ugali | ugali/utils/config.py | Config._validate | def _validate(self):
""" Enforce some structure to the config file """
# This could be done with a default config
# Check that specific keys exist
sections = odict([
('catalog',['dirname','basename',
'lon_field','lat_field','objid_field',
'mag_1_band', 'mag_1_field', 'mag_err_1_field',
'mag_2_band', 'mag_2_field', 'mag_err_2_field',
]),
('mask',[]),
('coords',['nside_catalog','nside_mask','nside_likelihood',
'nside_pixel','roi_radius','roi_radius_annulus',
'roi_radius_interior','coordsys',
]),
('likelihood',[]),
('output',[]),
('batch',[]),
])
keys = np.array(list(sections.keys()))
found = np.in1d(keys,list(self.keys()))
if not np.all(found):
msg = 'Missing sections: '+str(keys[~found])
raise Exception(msg)
for section,keys in sections.items():
keys = np.array(keys)
found = np.in1d(keys,list(self[section].keys()))
if not np.all(found):
msg = 'Missing keys in %s: '%(section)+str(keys[~found])
raise Exception(msg) | python | def _validate(self):
""" Enforce some structure to the config file """
# This could be done with a default config
# Check that specific keys exist
sections = odict([
('catalog',['dirname','basename',
'lon_field','lat_field','objid_field',
'mag_1_band', 'mag_1_field', 'mag_err_1_field',
'mag_2_band', 'mag_2_field', 'mag_err_2_field',
]),
('mask',[]),
('coords',['nside_catalog','nside_mask','nside_likelihood',
'nside_pixel','roi_radius','roi_radius_annulus',
'roi_radius_interior','coordsys',
]),
('likelihood',[]),
('output',[]),
('batch',[]),
])
keys = np.array(list(sections.keys()))
found = np.in1d(keys,list(self.keys()))
if not np.all(found):
msg = 'Missing sections: '+str(keys[~found])
raise Exception(msg)
for section,keys in sections.items():
keys = np.array(keys)
found = np.in1d(keys,list(self[section].keys()))
if not np.all(found):
msg = 'Missing keys in %s: '%(section)+str(keys[~found])
raise Exception(msg) | ['def', '_validate', '(', 'self', ')', ':', '# This could be done with a default config', '# Check that specific keys exist', 'sections', '=', 'odict', '(', '[', '(', "'catalog'", ',', '[', "'dirname'", ',', "'basename'", ',', "'lon_field'", ',', "'lat_field'", ',', "'objid_field'", ',', "'mag_1_band'", ',', "'mag_1_field'", ',', "'mag_err_1_field'", ',', "'mag_2_band'", ',', "'mag_2_field'", ',', "'mag_err_2_field'", ',', ']', ')', ',', '(', "'mask'", ',', '[', ']', ')', ',', '(', "'coords'", ',', '[', "'nside_catalog'", ',', "'nside_mask'", ',', "'nside_likelihood'", ',', "'nside_pixel'", ',', "'roi_radius'", ',', "'roi_radius_annulus'", ',', "'roi_radius_interior'", ',', "'coordsys'", ',', ']', ')', ',', '(', "'likelihood'", ',', '[', ']', ')', ',', '(', "'output'", ',', '[', ']', ')', ',', '(', "'batch'", ',', '[', ']', ')', ',', ']', ')', 'keys', '=', 'np', '.', 'array', '(', 'list', '(', 'sections', '.', 'keys', '(', ')', ')', ')', 'found', '=', 'np', '.', 'in1d', '(', 'keys', ',', 'list', '(', 'self', '.', 'keys', '(', ')', ')', ')', 'if', 'not', 'np', '.', 'all', '(', 'found', ')', ':', 'msg', '=', "'Missing sections: '", '+', 'str', '(', 'keys', '[', '~', 'found', ']', ')', 'raise', 'Exception', '(', 'msg', ')', 'for', 'section', ',', 'keys', 'in', 'sections', '.', 'items', '(', ')', ':', 'keys', '=', 'np', '.', 'array', '(', 'keys', ')', 'found', '=', 'np', '.', 'in1d', '(', 'keys', ',', 'list', '(', 'self', '[', 'section', ']', '.', 'keys', '(', ')', ')', ')', 'if', 'not', 'np', '.', 'all', '(', 'found', ')', ':', 'msg', '=', "'Missing keys in %s: '", '%', '(', 'section', ')', '+', 'str', '(', 'keys', '[', '~', 'found', ']', ')', 'raise', 'Exception', '(', 'msg', ')'] | Enforce some structure to the config file | ['Enforce', 'some', 'structure', 'to', 'the', 'config', 'file'] | train | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/config.py#L91-L124 |
9,839 | modin-project/modin | modin/backends/pandas/query_compiler.py | PandasQueryCompiler._process_all_any | def _process_all_any(self, func, **kwargs):
"""Calculates if any or all the values are true.
Return:
A new QueryCompiler object containing boolean values or boolean.
"""
axis = kwargs.get("axis", 0)
axis = 0 if axis is None else axis
kwargs["axis"] = axis
builder_func = self._build_mapreduce_func(func, **kwargs)
return self._full_reduce(axis, builder_func) | python | def _process_all_any(self, func, **kwargs):
"""Calculates if any or all the values are true.
Return:
A new QueryCompiler object containing boolean values or boolean.
"""
axis = kwargs.get("axis", 0)
axis = 0 if axis is None else axis
kwargs["axis"] = axis
builder_func = self._build_mapreduce_func(func, **kwargs)
return self._full_reduce(axis, builder_func) | ['def', '_process_all_any', '(', 'self', ',', 'func', ',', '*', '*', 'kwargs', ')', ':', 'axis', '=', 'kwargs', '.', 'get', '(', '"axis"', ',', '0', ')', 'axis', '=', '0', 'if', 'axis', 'is', 'None', 'else', 'axis', 'kwargs', '[', '"axis"', ']', '=', 'axis', 'builder_func', '=', 'self', '.', '_build_mapreduce_func', '(', 'func', ',', '*', '*', 'kwargs', ')', 'return', 'self', '.', '_full_reduce', '(', 'axis', ',', 'builder_func', ')'] | Calculates if any or all the values are true.
Return:
A new QueryCompiler object containing boolean values or boolean. | ['Calculates', 'if', 'any', 'or', 'all', 'the', 'values', 'are', 'true', '.'] | train | https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L998-L1008 |
9,840 | saltstack/salt | salt/modules/ssh.py | rm_known_host | def rm_known_host(user=None, hostname=None, config=None, port=None):
'''
Remove all keys belonging to hostname from a known_hosts file.
CLI Example:
.. code-block:: bash
salt '*' ssh.rm_known_host <user> <hostname>
'''
if not hostname:
return {'status': 'error',
'error': 'hostname argument required'}
full = _get_known_hosts_file(config=config, user=user)
if isinstance(full, dict):
return full
if not os.path.isfile(full):
return {'status': 'error',
'error': 'Known hosts file {0} does not exist'.format(full)}
ssh_hostname = _hostname_and_port_to_ssh_hostname(hostname, port)
cmd = ['ssh-keygen', '-R', ssh_hostname, '-f', full]
cmd_result = __salt__['cmd.run'](cmd, python_shell=False)
if not salt.utils.platform.is_windows():
# ssh-keygen creates a new file, thus a chown is required.
if os.geteuid() == 0 and user:
uinfo = __salt__['user.info'](user)
os.chown(full, uinfo['uid'], uinfo['gid'])
return {'status': 'removed', 'comment': cmd_result} | python | def rm_known_host(user=None, hostname=None, config=None, port=None):
'''
Remove all keys belonging to hostname from a known_hosts file.
CLI Example:
.. code-block:: bash
salt '*' ssh.rm_known_host <user> <hostname>
'''
if not hostname:
return {'status': 'error',
'error': 'hostname argument required'}
full = _get_known_hosts_file(config=config, user=user)
if isinstance(full, dict):
return full
if not os.path.isfile(full):
return {'status': 'error',
'error': 'Known hosts file {0} does not exist'.format(full)}
ssh_hostname = _hostname_and_port_to_ssh_hostname(hostname, port)
cmd = ['ssh-keygen', '-R', ssh_hostname, '-f', full]
cmd_result = __salt__['cmd.run'](cmd, python_shell=False)
if not salt.utils.platform.is_windows():
# ssh-keygen creates a new file, thus a chown is required.
if os.geteuid() == 0 and user:
uinfo = __salt__['user.info'](user)
os.chown(full, uinfo['uid'], uinfo['gid'])
return {'status': 'removed', 'comment': cmd_result} | ['def', 'rm_known_host', '(', 'user', '=', 'None', ',', 'hostname', '=', 'None', ',', 'config', '=', 'None', ',', 'port', '=', 'None', ')', ':', 'if', 'not', 'hostname', ':', 'return', '{', "'status'", ':', "'error'", ',', "'error'", ':', "'hostname argument required'", '}', 'full', '=', '_get_known_hosts_file', '(', 'config', '=', 'config', ',', 'user', '=', 'user', ')', 'if', 'isinstance', '(', 'full', ',', 'dict', ')', ':', 'return', 'full', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'full', ')', ':', 'return', '{', "'status'", ':', "'error'", ',', "'error'", ':', "'Known hosts file {0} does not exist'", '.', 'format', '(', 'full', ')', '}', 'ssh_hostname', '=', '_hostname_and_port_to_ssh_hostname', '(', 'hostname', ',', 'port', ')', 'cmd', '=', '[', "'ssh-keygen'", ',', "'-R'", ',', 'ssh_hostname', ',', "'-f'", ',', 'full', ']', 'cmd_result', '=', '__salt__', '[', "'cmd.run'", ']', '(', 'cmd', ',', 'python_shell', '=', 'False', ')', 'if', 'not', 'salt', '.', 'utils', '.', 'platform', '.', 'is_windows', '(', ')', ':', '# ssh-keygen creates a new file, thus a chown is required.', 'if', 'os', '.', 'geteuid', '(', ')', '==', '0', 'and', 'user', ':', 'uinfo', '=', '__salt__', '[', "'user.info'", ']', '(', 'user', ')', 'os', '.', 'chown', '(', 'full', ',', 'uinfo', '[', "'uid'", ']', ',', 'uinfo', '[', "'gid'", ']', ')', 'return', '{', "'status'", ':', "'removed'", ',', "'comment'", ':', 'cmd_result', '}'] | Remove all keys belonging to hostname from a known_hosts file.
CLI Example:
.. code-block:: bash
salt '*' ssh.rm_known_host <user> <hostname> | ['Remove', 'all', 'keys', 'belonging', 'to', 'hostname', 'from', 'a', 'known_hosts', 'file', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ssh.py#L1013-L1044 |
9,841 | Metatab/metapack | metapack/doc.py | MetapackDoc._repr_html_ | def _repr_html_(self, **kwargs):
"""Produce HTML for Jupyter Notebook"""
from jinja2 import Template
from markdown import markdown as convert_markdown
extensions = [
'markdown.extensions.extra',
'markdown.extensions.admonition'
]
return convert_markdown(self.markdown, extensions) | python | def _repr_html_(self, **kwargs):
"""Produce HTML for Jupyter Notebook"""
from jinja2 import Template
from markdown import markdown as convert_markdown
extensions = [
'markdown.extensions.extra',
'markdown.extensions.admonition'
]
return convert_markdown(self.markdown, extensions) | ['def', '_repr_html_', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'from', 'jinja2', 'import', 'Template', 'from', 'markdown', 'import', 'markdown', 'as', 'convert_markdown', 'extensions', '=', '[', "'markdown.extensions.extra'", ',', "'markdown.extensions.admonition'", ']', 'return', 'convert_markdown', '(', 'self', '.', 'markdown', ',', 'extensions', ')'] | Produce HTML for Jupyter Notebook | ['Produce', 'HTML', 'for', 'Jupyter', 'Notebook'] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L233-L243 |
9,842 | pmorissette/bt | bt/core.py | StrategyBase.close | def close(self, child):
"""
Close a child position - alias for rebalance(0, child). This will also
flatten (close out all) the child's children.
Args:
* child (str): Child, specified by name.
"""
c = self.children[child]
# flatten if children not None
if c.children is not None and len(c.children) != 0:
c.flatten()
if c.value != 0. and not np.isnan(c.value):
c.allocate(-c.value) | python | def close(self, child):
"""
Close a child position - alias for rebalance(0, child). This will also
flatten (close out all) the child's children.
Args:
* child (str): Child, specified by name.
"""
c = self.children[child]
# flatten if children not None
if c.children is not None and len(c.children) != 0:
c.flatten()
if c.value != 0. and not np.isnan(c.value):
c.allocate(-c.value) | ['def', 'close', '(', 'self', ',', 'child', ')', ':', 'c', '=', 'self', '.', 'children', '[', 'child', ']', '# flatten if children not None', 'if', 'c', '.', 'children', 'is', 'not', 'None', 'and', 'len', '(', 'c', '.', 'children', ')', '!=', '0', ':', 'c', '.', 'flatten', '(', ')', 'if', 'c', '.', 'value', '!=', '0.', 'and', 'not', 'np', '.', 'isnan', '(', 'c', '.', 'value', ')', ':', 'c', '.', 'allocate', '(', '-', 'c', '.', 'value', ')'] | Close a child position - alias for rebalance(0, child). This will also
flatten (close out all) the child's children.
Args:
* child (str): Child, specified by name. | ['Close', 'a', 'child', 'position', '-', 'alias', 'for', 'rebalance', '(', '0', 'child', ')', '.', 'This', 'will', 'also', 'flatten', '(', 'close', 'out', 'all', ')', 'the', 'child', 's', 'children', '.'] | train | https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L724-L738 |
9,843 | rosenbrockc/ci | pyci/server.py | Wiki._edit_main | def _edit_main(self, request):
"""Adds the link to the new unit testing results on the repo's main wiki page.
"""
self.prefix = "{}_Pull_Request_{}".format(request.repo.name, request.pull.number)
if not self.testmode:
page = site.pages[self.basepage]
text = page.text()
else:
text = "This is a fake wiki page.\n\n<!--@CI:Placeholder-->"
self.newpage = self.prefix
link = "Pull Request #{}".format(request.pull.number)
text = text.replace("<!--@CI:Placeholder-->",
"* [[{}|{}]]\n<!--@CI:Placeholder-->".format(self.newpage, link))
if not self.testmode:
result = page.save(text, summary="Added {} unit test link.".format(link), minor=True, bot=True)
return result[u'result'] == u'Success'
else:
return text | python | def _edit_main(self, request):
"""Adds the link to the new unit testing results on the repo's main wiki page.
"""
self.prefix = "{}_Pull_Request_{}".format(request.repo.name, request.pull.number)
if not self.testmode:
page = site.pages[self.basepage]
text = page.text()
else:
text = "This is a fake wiki page.\n\n<!--@CI:Placeholder-->"
self.newpage = self.prefix
link = "Pull Request #{}".format(request.pull.number)
text = text.replace("<!--@CI:Placeholder-->",
"* [[{}|{}]]\n<!--@CI:Placeholder-->".format(self.newpage, link))
if not self.testmode:
result = page.save(text, summary="Added {} unit test link.".format(link), minor=True, bot=True)
return result[u'result'] == u'Success'
else:
return text | ['def', '_edit_main', '(', 'self', ',', 'request', ')', ':', 'self', '.', 'prefix', '=', '"{}_Pull_Request_{}"', '.', 'format', '(', 'request', '.', 'repo', '.', 'name', ',', 'request', '.', 'pull', '.', 'number', ')', 'if', 'not', 'self', '.', 'testmode', ':', 'page', '=', 'site', '.', 'pages', '[', 'self', '.', 'basepage', ']', 'text', '=', 'page', '.', 'text', '(', ')', 'else', ':', 'text', '=', '"This is a fake wiki page.\\n\\n<!--@CI:Placeholder-->"', 'self', '.', 'newpage', '=', 'self', '.', 'prefix', 'link', '=', '"Pull Request #{}"', '.', 'format', '(', 'request', '.', 'pull', '.', 'number', ')', 'text', '=', 'text', '.', 'replace', '(', '"<!--@CI:Placeholder-->"', ',', '"* [[{}|{}]]\\n<!--@CI:Placeholder-->"', '.', 'format', '(', 'self', '.', 'newpage', ',', 'link', ')', ')', 'if', 'not', 'self', '.', 'testmode', ':', 'result', '=', 'page', '.', 'save', '(', 'text', ',', 'summary', '=', '"Added {} unit test link."', '.', 'format', '(', 'link', ')', ',', 'minor', '=', 'True', ',', 'bot', '=', 'True', ')', 'return', 'result', '[', "u'result'", ']', '==', "u'Success'", 'else', ':', 'return', 'text'] | Adds the link to the new unit testing results on the repo's main wiki page. | ['Adds', 'the', 'link', 'to', 'the', 'new', 'unit', 'testing', 'results', 'on', 'the', 'repo', 's', 'main', 'wiki', 'page', '.'] | train | https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/server.py#L706-L724 |
9,844 | Esri/ArcREST | src/arcrest/manageportal/administration.py | _Federation.unfederate | def unfederate(self, serverId):
"""
This operation unfederates an ArcGIS Server from Portal for ArcGIS
"""
url = self._url + "/servers/{serverid}/unfederate".format(
serverid=serverId)
params = {"f" : "json"}
return self._get(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_ur) | python | def unfederate(self, serverId):
"""
This operation unfederates an ArcGIS Server from Portal for ArcGIS
"""
url = self._url + "/servers/{serverid}/unfederate".format(
serverid=serverId)
params = {"f" : "json"}
return self._get(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_ur) | ['def', 'unfederate', '(', 'self', ',', 'serverId', ')', ':', 'url', '=', 'self', '.', '_url', '+', '"/servers/{serverid}/unfederate"', '.', 'format', '(', 'serverid', '=', 'serverId', ')', 'params', '=', '{', '"f"', ':', '"json"', '}', 'return', 'self', '.', '_get', '(', 'url', '=', 'url', ',', 'param_dict', '=', 'params', ',', 'proxy_port', '=', 'self', '.', '_proxy_port', ',', 'proxy_url', '=', 'self', '.', '_proxy_ur', ')'] | This operation unfederates an ArcGIS Server from Portal for ArcGIS | ['This', 'operation', 'unfederates', 'an', 'ArcGIS', 'Server', 'from', 'Portal', 'for', 'ArcGIS'] | train | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageportal/administration.py#L72-L82 |
9,845 | estnltk/estnltk | estnltk/text.py | Text.sentence_starts | def sentence_starts(self):
"""The list of start positions representing ``sentences`` layer elements."""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.starts(SENTENCES) | python | def sentence_starts(self):
"""The list of start positions representing ``sentences`` layer elements."""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.starts(SENTENCES) | ['def', 'sentence_starts', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'is_tagged', '(', 'SENTENCES', ')', ':', 'self', '.', 'tokenize_sentences', '(', ')', 'return', 'self', '.', 'starts', '(', 'SENTENCES', ')'] | The list of start positions representing ``sentences`` layer elements. | ['The', 'list', 'of', 'start', 'positions', 'representing', 'sentences', 'layer', 'elements', '.'] | train | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L504-L508 |
9,846 | ianmiell/shutit | shutit_class.py | ShutIt.stop_all | def stop_all(self, run_order=-1):
"""Runs stop method on all modules less than the passed-in run_order.
Used when target is exporting itself mid-build, so we clean up state
before committing run files etc.
"""
shutit_global.shutit_global_object.yield_to_draw()
# sort them so they're stopped in reverse order
for module_id in self.module_ids(rev=True):
shutit_module_obj = self.shutit_map[module_id]
if run_order == -1 or shutit_module_obj.run_order <= run_order:
if self.is_installed(shutit_module_obj):
if not shutit_module_obj.stop(self):
self.fail('failed to stop: ' + module_id, shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').shutit_pexpect_child) | python | def stop_all(self, run_order=-1):
"""Runs stop method on all modules less than the passed-in run_order.
Used when target is exporting itself mid-build, so we clean up state
before committing run files etc.
"""
shutit_global.shutit_global_object.yield_to_draw()
# sort them so they're stopped in reverse order
for module_id in self.module_ids(rev=True):
shutit_module_obj = self.shutit_map[module_id]
if run_order == -1 or shutit_module_obj.run_order <= run_order:
if self.is_installed(shutit_module_obj):
if not shutit_module_obj.stop(self):
self.fail('failed to stop: ' + module_id, shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').shutit_pexpect_child) | ['def', 'stop_all', '(', 'self', ',', 'run_order', '=', '-', '1', ')', ':', 'shutit_global', '.', 'shutit_global_object', '.', 'yield_to_draw', '(', ')', "# sort them so they're stopped in reverse order", 'for', 'module_id', 'in', 'self', '.', 'module_ids', '(', 'rev', '=', 'True', ')', ':', 'shutit_module_obj', '=', 'self', '.', 'shutit_map', '[', 'module_id', ']', 'if', 'run_order', '==', '-', '1', 'or', 'shutit_module_obj', '.', 'run_order', '<=', 'run_order', ':', 'if', 'self', '.', 'is_installed', '(', 'shutit_module_obj', ')', ':', 'if', 'not', 'shutit_module_obj', '.', 'stop', '(', 'self', ')', ':', 'self', '.', 'fail', '(', "'failed to stop: '", '+', 'module_id', ',', 'shutit_pexpect_child', '=', 'self', '.', 'get_shutit_pexpect_session_from_id', '(', "'target_child'", ')', '.', 'shutit_pexpect_child', ')'] | Runs stop method on all modules less than the passed-in run_order.
Used when target is exporting itself mid-build, so we clean up state
before committing run files etc. | ['Runs', 'stop', 'method', 'on', 'all', 'modules', 'less', 'than', 'the', 'passed', '-', 'in', 'run_order', '.', 'Used', 'when', 'target', 'is', 'exporting', 'itself', 'mid', '-', 'build', 'so', 'we', 'clean', 'up', 'state', 'before', 'committing', 'run', 'files', 'etc', '.'] | train | https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L4615-L4627 |
9,847 | mikedh/trimesh | trimesh/util.py | append_faces | def append_faces(vertices_seq, faces_seq):
"""
Given a sequence of zero- indexed faces and vertices
combine them into a single array of faces and
a single array of vertices.
Parameters
-----------
vertices_seq : (n, ) sequence of (m, d) float
Multiple arrays of verticesvertex arrays
faces_seq : (n, ) sequence of (p, j) int
Zero indexed faces for matching vertices
Returns
----------
vertices : (i, d) float
Points in space
faces : (j, 3) int
Reference vertex indices
"""
# the length of each vertex array
vertices_len = np.array([len(i) for i in vertices_seq])
# how much each group of faces needs to be offset
face_offset = np.append(0, np.cumsum(vertices_len)[:-1])
new_faces = []
for offset, faces in zip(face_offset, faces_seq):
if len(faces) == 0:
continue
# apply the index offset
new_faces.append(faces + offset)
# stack to clean (n, 3) float
vertices = vstack_empty(vertices_seq)
# stack to clean (n, 3) int
faces = vstack_empty(new_faces)
return vertices, faces | python | def append_faces(vertices_seq, faces_seq):
"""
Given a sequence of zero- indexed faces and vertices
combine them into a single array of faces and
a single array of vertices.
Parameters
-----------
vertices_seq : (n, ) sequence of (m, d) float
Multiple arrays of verticesvertex arrays
faces_seq : (n, ) sequence of (p, j) int
Zero indexed faces for matching vertices
Returns
----------
vertices : (i, d) float
Points in space
faces : (j, 3) int
Reference vertex indices
"""
# the length of each vertex array
vertices_len = np.array([len(i) for i in vertices_seq])
# how much each group of faces needs to be offset
face_offset = np.append(0, np.cumsum(vertices_len)[:-1])
new_faces = []
for offset, faces in zip(face_offset, faces_seq):
if len(faces) == 0:
continue
# apply the index offset
new_faces.append(faces + offset)
# stack to clean (n, 3) float
vertices = vstack_empty(vertices_seq)
# stack to clean (n, 3) int
faces = vstack_empty(new_faces)
return vertices, faces | ['def', 'append_faces', '(', 'vertices_seq', ',', 'faces_seq', ')', ':', '# the length of each vertex array', 'vertices_len', '=', 'np', '.', 'array', '(', '[', 'len', '(', 'i', ')', 'for', 'i', 'in', 'vertices_seq', ']', ')', '# how much each group of faces needs to be offset', 'face_offset', '=', 'np', '.', 'append', '(', '0', ',', 'np', '.', 'cumsum', '(', 'vertices_len', ')', '[', ':', '-', '1', ']', ')', 'new_faces', '=', '[', ']', 'for', 'offset', ',', 'faces', 'in', 'zip', '(', 'face_offset', ',', 'faces_seq', ')', ':', 'if', 'len', '(', 'faces', ')', '==', '0', ':', 'continue', '# apply the index offset', 'new_faces', '.', 'append', '(', 'faces', '+', 'offset', ')', '# stack to clean (n, 3) float', 'vertices', '=', 'vstack_empty', '(', 'vertices_seq', ')', '# stack to clean (n, 3) int', 'faces', '=', 'vstack_empty', '(', 'new_faces', ')', 'return', 'vertices', ',', 'faces'] | Given a sequence of zero- indexed faces and vertices
combine them into a single array of faces and
a single array of vertices.
Parameters
-----------
vertices_seq : (n, ) sequence of (m, d) float
Multiple arrays of verticesvertex arrays
faces_seq : (n, ) sequence of (p, j) int
Zero indexed faces for matching vertices
Returns
----------
vertices : (i, d) float
Points in space
faces : (j, 3) int
Reference vertex indices | ['Given', 'a', 'sequence', 'of', 'zero', '-', 'indexed', 'faces', 'and', 'vertices', 'combine', 'them', 'into', 'a', 'single', 'array', 'of', 'faces', 'and', 'a', 'single', 'array', 'of', 'vertices', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/util.py#L948-L984 |
9,848 | jonathf/chaospy | chaospy/poly/shaping.py | transpose | def transpose(vari):
"""
Transpose a shapeable quantety.
Args:
vari (chaospy.poly.base.Poly, numpy.ndarray):
Quantety of interest.
Returns:
(chaospy.poly.base.Poly, numpy.ndarray):
Same type as ``vari``.
Examples:
>>> P = chaospy.reshape(chaospy.prange(4), (2,2))
>>> print(P)
[[1, q0], [q0^2, q0^3]]
>>> print(chaospy.transpose(P))
[[1, q0^2], [q0, q0^3]]
"""
if isinstance(vari, Poly):
core = vari.A.copy()
for key in vari.keys:
core[key] = transpose(core[key])
return Poly(core, vari.dim, vari.shape[::-1], vari.dtype)
return numpy.transpose(vari) | python | def transpose(vari):
"""
Transpose a shapeable quantety.
Args:
vari (chaospy.poly.base.Poly, numpy.ndarray):
Quantety of interest.
Returns:
(chaospy.poly.base.Poly, numpy.ndarray):
Same type as ``vari``.
Examples:
>>> P = chaospy.reshape(chaospy.prange(4), (2,2))
>>> print(P)
[[1, q0], [q0^2, q0^3]]
>>> print(chaospy.transpose(P))
[[1, q0^2], [q0, q0^3]]
"""
if isinstance(vari, Poly):
core = vari.A.copy()
for key in vari.keys:
core[key] = transpose(core[key])
return Poly(core, vari.dim, vari.shape[::-1], vari.dtype)
return numpy.transpose(vari) | ['def', 'transpose', '(', 'vari', ')', ':', 'if', 'isinstance', '(', 'vari', ',', 'Poly', ')', ':', 'core', '=', 'vari', '.', 'A', '.', 'copy', '(', ')', 'for', 'key', 'in', 'vari', '.', 'keys', ':', 'core', '[', 'key', ']', '=', 'transpose', '(', 'core', '[', 'key', ']', ')', 'return', 'Poly', '(', 'core', ',', 'vari', '.', 'dim', ',', 'vari', '.', 'shape', '[', ':', ':', '-', '1', ']', ',', 'vari', '.', 'dtype', ')', 'return', 'numpy', '.', 'transpose', '(', 'vari', ')'] | Transpose a shapeable quantety.
Args:
vari (chaospy.poly.base.Poly, numpy.ndarray):
Quantety of interest.
Returns:
(chaospy.poly.base.Poly, numpy.ndarray):
Same type as ``vari``.
Examples:
>>> P = chaospy.reshape(chaospy.prange(4), (2,2))
>>> print(P)
[[1, q0], [q0^2, q0^3]]
>>> print(chaospy.transpose(P))
[[1, q0^2], [q0, q0^3]] | ['Transpose', 'a', 'shapeable', 'quantety', '.'] | train | https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/poly/shaping.py#L114-L139 |
9,849 | nitely/django-hooks | hooks/templatehook.py | Hook.unregister | def unregister(self, name, func):
"""
Remove a previously registered callback
:param str name: Hook name
:param callable func: A function reference\
that was registered previously
"""
try:
templatehook = self._registry[name]
except KeyError:
return
templatehook.unregister(func) | python | def unregister(self, name, func):
"""
Remove a previously registered callback
:param str name: Hook name
:param callable func: A function reference\
that was registered previously
"""
try:
templatehook = self._registry[name]
except KeyError:
return
templatehook.unregister(func) | ['def', 'unregister', '(', 'self', ',', 'name', ',', 'func', ')', ':', 'try', ':', 'templatehook', '=', 'self', '.', '_registry', '[', 'name', ']', 'except', 'KeyError', ':', 'return', 'templatehook', '.', 'unregister', '(', 'func', ')'] | Remove a previously registered callback
:param str name: Hook name
:param callable func: A function reference\
that was registered previously | ['Remove', 'a', 'previously', 'registered', 'callback'] | train | https://github.com/nitely/django-hooks/blob/26ea2150c9be110e90b9ee60fbfd1065ac30ab1d/hooks/templatehook.py#L117-L130 |
9,850 | slightlynybbled/tk_tools | tk_tools/canvas.py | Led.to_yellow | def to_yellow(self, on: bool=False):
"""
Change the LED to yellow (on or off)
:param on: True or False
:return: None
"""
self._on = on
if on:
self._load_new(led_yellow_on)
if self._toggle_on_click:
self._canvas.bind('<Button-1>',
lambda x: self.to_yellow(False))
else:
self._load_new(led_yellow)
if self._toggle_on_click:
self._canvas.bind('<Button-1>',
lambda x: self.to_yellow(True)) | python | def to_yellow(self, on: bool=False):
"""
Change the LED to yellow (on or off)
:param on: True or False
:return: None
"""
self._on = on
if on:
self._load_new(led_yellow_on)
if self._toggle_on_click:
self._canvas.bind('<Button-1>',
lambda x: self.to_yellow(False))
else:
self._load_new(led_yellow)
if self._toggle_on_click:
self._canvas.bind('<Button-1>',
lambda x: self.to_yellow(True)) | ['def', 'to_yellow', '(', 'self', ',', 'on', ':', 'bool', '=', 'False', ')', ':', 'self', '.', '_on', '=', 'on', 'if', 'on', ':', 'self', '.', '_load_new', '(', 'led_yellow_on', ')', 'if', 'self', '.', '_toggle_on_click', ':', 'self', '.', '_canvas', '.', 'bind', '(', "'<Button-1>'", ',', 'lambda', 'x', ':', 'self', '.', 'to_yellow', '(', 'False', ')', ')', 'else', ':', 'self', '.', '_load_new', '(', 'led_yellow', ')', 'if', 'self', '.', '_toggle_on_click', ':', 'self', '.', '_canvas', '.', 'bind', '(', "'<Button-1>'", ',', 'lambda', 'x', ':', 'self', '.', 'to_yellow', '(', 'True', ')', ')'] | Change the LED to yellow (on or off)
:param on: True or False
:return: None | ['Change', 'the', 'LED', 'to', 'yellow', '(', 'on', 'or', 'off', ')', ':', 'param', 'on', ':', 'True', 'or', 'False', ':', 'return', ':', 'None'] | train | https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L597-L615 |
9,851 | aws/aws-encryption-sdk-python | examples/src/data_key_caching_basic.py | encrypt_with_caching | def encrypt_with_caching(kms_cmk_arn, max_age_in_cache, cache_capacity):
"""Encrypts a string using an AWS KMS customer master key (CMK) and data key caching.
:param str kms_cmk_arn: Amazon Resource Name (ARN) of the KMS customer master key
:param float max_age_in_cache: Maximum time in seconds that a cached entry can be used
:param int cache_capacity: Maximum number of entries to retain in cache at once
"""
# Data to be encrypted
my_data = "My plaintext data"
# Security thresholds
# Max messages (or max bytes per) data key are optional
MAX_ENTRY_MESSAGES = 100
# Create an encryption context
encryption_context = {"purpose": "test"}
# Create a master key provider for the KMS customer master key (CMK)
key_provider = aws_encryption_sdk.KMSMasterKeyProvider(key_ids=[kms_cmk_arn])
# Create a local cache
cache = aws_encryption_sdk.LocalCryptoMaterialsCache(cache_capacity)
# Create a caching CMM
caching_cmm = aws_encryption_sdk.CachingCryptoMaterialsManager(
master_key_provider=key_provider,
cache=cache,
max_age=max_age_in_cache,
max_messages_encrypted=MAX_ENTRY_MESSAGES,
)
# When the call to encrypt data specifies a caching CMM,
# the encryption operation uses the data key cache specified
# in the caching CMM
encrypted_message, _header = aws_encryption_sdk.encrypt(
source=my_data, materials_manager=caching_cmm, encryption_context=encryption_context
)
return encrypted_message | python | def encrypt_with_caching(kms_cmk_arn, max_age_in_cache, cache_capacity):
"""Encrypts a string using an AWS KMS customer master key (CMK) and data key caching.
:param str kms_cmk_arn: Amazon Resource Name (ARN) of the KMS customer master key
:param float max_age_in_cache: Maximum time in seconds that a cached entry can be used
:param int cache_capacity: Maximum number of entries to retain in cache at once
"""
# Data to be encrypted
my_data = "My plaintext data"
# Security thresholds
# Max messages (or max bytes per) data key are optional
MAX_ENTRY_MESSAGES = 100
# Create an encryption context
encryption_context = {"purpose": "test"}
# Create a master key provider for the KMS customer master key (CMK)
key_provider = aws_encryption_sdk.KMSMasterKeyProvider(key_ids=[kms_cmk_arn])
# Create a local cache
cache = aws_encryption_sdk.LocalCryptoMaterialsCache(cache_capacity)
# Create a caching CMM
caching_cmm = aws_encryption_sdk.CachingCryptoMaterialsManager(
master_key_provider=key_provider,
cache=cache,
max_age=max_age_in_cache,
max_messages_encrypted=MAX_ENTRY_MESSAGES,
)
# When the call to encrypt data specifies a caching CMM,
# the encryption operation uses the data key cache specified
# in the caching CMM
encrypted_message, _header = aws_encryption_sdk.encrypt(
source=my_data, materials_manager=caching_cmm, encryption_context=encryption_context
)
return encrypted_message | ['def', 'encrypt_with_caching', '(', 'kms_cmk_arn', ',', 'max_age_in_cache', ',', 'cache_capacity', ')', ':', '# Data to be encrypted', 'my_data', '=', '"My plaintext data"', '# Security thresholds', '# Max messages (or max bytes per) data key are optional', 'MAX_ENTRY_MESSAGES', '=', '100', '# Create an encryption context', 'encryption_context', '=', '{', '"purpose"', ':', '"test"', '}', '# Create a master key provider for the KMS customer master key (CMK)', 'key_provider', '=', 'aws_encryption_sdk', '.', 'KMSMasterKeyProvider', '(', 'key_ids', '=', '[', 'kms_cmk_arn', ']', ')', '# Create a local cache', 'cache', '=', 'aws_encryption_sdk', '.', 'LocalCryptoMaterialsCache', '(', 'cache_capacity', ')', '# Create a caching CMM', 'caching_cmm', '=', 'aws_encryption_sdk', '.', 'CachingCryptoMaterialsManager', '(', 'master_key_provider', '=', 'key_provider', ',', 'cache', '=', 'cache', ',', 'max_age', '=', 'max_age_in_cache', ',', 'max_messages_encrypted', '=', 'MAX_ENTRY_MESSAGES', ',', ')', '# When the call to encrypt data specifies a caching CMM,', '# the encryption operation uses the data key cache specified', '# in the caching CMM', 'encrypted_message', ',', '_header', '=', 'aws_encryption_sdk', '.', 'encrypt', '(', 'source', '=', 'my_data', ',', 'materials_manager', '=', 'caching_cmm', ',', 'encryption_context', '=', 'encryption_context', ')', 'return', 'encrypted_message'] | Encrypts a string using an AWS KMS customer master key (CMK) and data key caching.
:param str kms_cmk_arn: Amazon Resource Name (ARN) of the KMS customer master key
:param float max_age_in_cache: Maximum time in seconds that a cached entry can be used
:param int cache_capacity: Maximum number of entries to retain in cache at once | ['Encrypts', 'a', 'string', 'using', 'an', 'AWS', 'KMS', 'customer', 'master', 'key', '(', 'CMK', ')', 'and', 'data', 'key', 'caching', '.'] | train | https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/examples/src/data_key_caching_basic.py#L17-L55 |
9,852 | Galarzaa90/tibia.py | tibiapy/utils.py | parse_tibiacom_content | def parse_tibiacom_content(content, *, html_class="BoxContent", tag="div", builder="lxml"):
"""Parses HTML content from Tibia.com into a BeautifulSoup object.
Parameters
----------
content: :class:`str`
The raw HTML content from Tibia.com
html_class: :class:`str`
The HTML class of the parsed element. The default value is ``BoxContent``.
tag: :class:`str`
The HTML tag select. The default value is ``div``.
builder: :class:`str`
The builder to use. The default value is ``lxml``.
Returns
-------
:class:`bs4.BeautifulSoup`, optional
The parsed content.
"""
return bs4.BeautifulSoup(content.replace('ISO-8859-1', 'utf-8'), builder,
parse_only=bs4.SoupStrainer(tag, class_=html_class)) | python | def parse_tibiacom_content(content, *, html_class="BoxContent", tag="div", builder="lxml"):
"""Parses HTML content from Tibia.com into a BeautifulSoup object.
Parameters
----------
content: :class:`str`
The raw HTML content from Tibia.com
html_class: :class:`str`
The HTML class of the parsed element. The default value is ``BoxContent``.
tag: :class:`str`
The HTML tag select. The default value is ``div``.
builder: :class:`str`
The builder to use. The default value is ``lxml``.
Returns
-------
:class:`bs4.BeautifulSoup`, optional
The parsed content.
"""
return bs4.BeautifulSoup(content.replace('ISO-8859-1', 'utf-8'), builder,
parse_only=bs4.SoupStrainer(tag, class_=html_class)) | ['def', 'parse_tibiacom_content', '(', 'content', ',', '*', ',', 'html_class', '=', '"BoxContent"', ',', 'tag', '=', '"div"', ',', 'builder', '=', '"lxml"', ')', ':', 'return', 'bs4', '.', 'BeautifulSoup', '(', 'content', '.', 'replace', '(', "'ISO-8859-1'", ',', "'utf-8'", ')', ',', 'builder', ',', 'parse_only', '=', 'bs4', '.', 'SoupStrainer', '(', 'tag', ',', 'class_', '=', 'html_class', ')', ')'] | Parses HTML content from Tibia.com into a BeautifulSoup object.
Parameters
----------
content: :class:`str`
The raw HTML content from Tibia.com
html_class: :class:`str`
The HTML class of the parsed element. The default value is ``BoxContent``.
tag: :class:`str`
The HTML tag select. The default value is ``div``.
builder: :class:`str`
The builder to use. The default value is ``lxml``.
Returns
-------
:class:`bs4.BeautifulSoup`, optional
The parsed content. | ['Parses', 'HTML', 'content', 'from', 'Tibia', '.', 'com', 'into', 'a', 'BeautifulSoup', 'object', '.'] | train | https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/utils.py#L242-L262 |
9,853 | xapple/plumbing | plumbing/databases/sqlite_database.py | SQLiteDatabase.get_and_order | def get_and_order(self, ids, column=None, table=None):
"""Get specific entries and order them in the same way."""
command = """
SELECT rowid, * from "data"
WHERE rowid in (%s)
ORDER BY CASE rowid
%s
END;
"""
ordered = ','.join(map(str,ids))
rowids = '\n'.join("WHEN '%s' THEN %s" % (row,i) for i,row in enumerate(ids))
command = command % (ordered, rowids) | python | def get_and_order(self, ids, column=None, table=None):
"""Get specific entries and order them in the same way."""
command = """
SELECT rowid, * from "data"
WHERE rowid in (%s)
ORDER BY CASE rowid
%s
END;
"""
ordered = ','.join(map(str,ids))
rowids = '\n'.join("WHEN '%s' THEN %s" % (row,i) for i,row in enumerate(ids))
command = command % (ordered, rowids) | ['def', 'get_and_order', '(', 'self', ',', 'ids', ',', 'column', '=', 'None', ',', 'table', '=', 'None', ')', ':', 'command', '=', '"""\n SELECT rowid, * from "data"\n WHERE rowid in (%s)\n ORDER BY CASE rowid\n %s\n END;\n """', 'ordered', '=', "','", '.', 'join', '(', 'map', '(', 'str', ',', 'ids', ')', ')', 'rowids', '=', "'\\n'", '.', 'join', '(', '"WHEN \'%s\' THEN %s"', '%', '(', 'row', ',', 'i', ')', 'for', 'i', ',', 'row', 'in', 'enumerate', '(', 'ids', ')', ')', 'command', '=', 'command', '%', '(', 'ordered', ',', 'rowids', ')'] | Get specific entries and order them in the same way. | ['Get', 'specific', 'entries', 'and', 'order', 'them', 'in', 'the', 'same', 'way', '.'] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/databases/sqlite_database.py#L334-L345 |
9,854 | bcbio/bcbio-nextgen | bcbio/variation/genotype.py | batch_for_variantcall | def batch_for_variantcall(samples):
"""Prepare a set of samples for parallel variant calling.
CWL input target that groups samples into batches and variant callers
for parallel processing.
If doing joint calling, with `tools_on: [gvcf]`, split the sample into
individuals instead of combining into a batch.
"""
sample_order = [dd.get_sample_name(utils.to_single_data(x)) for x in samples]
to_process, extras = _dup_samples_by_variantcaller(samples, require_bam=False)
batch_groups = collections.defaultdict(list)
to_process = [utils.to_single_data(x) for x in to_process]
for data in cwlutils.samples_to_records(to_process):
vc = get_variantcaller(data, require_bam=False)
batches = dd.get_batches(data) or dd.get_sample_name(data)
if not isinstance(batches, (list, tuple)):
batches = [batches]
for b in batches:
batch_groups[(b, vc)].append(utils.deepish_copy(data))
batches = []
for cur_group in batch_groups.values():
joint_calling = any([is_joint(d) for d in cur_group])
if joint_calling:
for d in cur_group:
batches.append([d])
else:
batches.append(cur_group)
def by_original_order(xs):
return (min([sample_order.index(dd.get_sample_name(x)) for x in xs]),
min([dd.get_variantcaller_order(x) for x in xs]))
return sorted(batches + extras, key=by_original_order) | python | def batch_for_variantcall(samples):
"""Prepare a set of samples for parallel variant calling.
CWL input target that groups samples into batches and variant callers
for parallel processing.
If doing joint calling, with `tools_on: [gvcf]`, split the sample into
individuals instead of combining into a batch.
"""
sample_order = [dd.get_sample_name(utils.to_single_data(x)) for x in samples]
to_process, extras = _dup_samples_by_variantcaller(samples, require_bam=False)
batch_groups = collections.defaultdict(list)
to_process = [utils.to_single_data(x) for x in to_process]
for data in cwlutils.samples_to_records(to_process):
vc = get_variantcaller(data, require_bam=False)
batches = dd.get_batches(data) or dd.get_sample_name(data)
if not isinstance(batches, (list, tuple)):
batches = [batches]
for b in batches:
batch_groups[(b, vc)].append(utils.deepish_copy(data))
batches = []
for cur_group in batch_groups.values():
joint_calling = any([is_joint(d) for d in cur_group])
if joint_calling:
for d in cur_group:
batches.append([d])
else:
batches.append(cur_group)
def by_original_order(xs):
return (min([sample_order.index(dd.get_sample_name(x)) for x in xs]),
min([dd.get_variantcaller_order(x) for x in xs]))
return sorted(batches + extras, key=by_original_order) | ['def', 'batch_for_variantcall', '(', 'samples', ')', ':', 'sample_order', '=', '[', 'dd', '.', 'get_sample_name', '(', 'utils', '.', 'to_single_data', '(', 'x', ')', ')', 'for', 'x', 'in', 'samples', ']', 'to_process', ',', 'extras', '=', '_dup_samples_by_variantcaller', '(', 'samples', ',', 'require_bam', '=', 'False', ')', 'batch_groups', '=', 'collections', '.', 'defaultdict', '(', 'list', ')', 'to_process', '=', '[', 'utils', '.', 'to_single_data', '(', 'x', ')', 'for', 'x', 'in', 'to_process', ']', 'for', 'data', 'in', 'cwlutils', '.', 'samples_to_records', '(', 'to_process', ')', ':', 'vc', '=', 'get_variantcaller', '(', 'data', ',', 'require_bam', '=', 'False', ')', 'batches', '=', 'dd', '.', 'get_batches', '(', 'data', ')', 'or', 'dd', '.', 'get_sample_name', '(', 'data', ')', 'if', 'not', 'isinstance', '(', 'batches', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'batches', '=', '[', 'batches', ']', 'for', 'b', 'in', 'batches', ':', 'batch_groups', '[', '(', 'b', ',', 'vc', ')', ']', '.', 'append', '(', 'utils', '.', 'deepish_copy', '(', 'data', ')', ')', 'batches', '=', '[', ']', 'for', 'cur_group', 'in', 'batch_groups', '.', 'values', '(', ')', ':', 'joint_calling', '=', 'any', '(', '[', 'is_joint', '(', 'd', ')', 'for', 'd', 'in', 'cur_group', ']', ')', 'if', 'joint_calling', ':', 'for', 'd', 'in', 'cur_group', ':', 'batches', '.', 'append', '(', '[', 'd', ']', ')', 'else', ':', 'batches', '.', 'append', '(', 'cur_group', ')', 'def', 'by_original_order', '(', 'xs', ')', ':', 'return', '(', 'min', '(', '[', 'sample_order', '.', 'index', '(', 'dd', '.', 'get_sample_name', '(', 'x', ')', ')', 'for', 'x', 'in', 'xs', ']', ')', ',', 'min', '(', '[', 'dd', '.', 'get_variantcaller_order', '(', 'x', ')', 'for', 'x', 'in', 'xs', ']', ')', ')', 'return', 'sorted', '(', 'batches', '+', 'extras', ',', 'key', '=', 'by_original_order', ')'] | Prepare a set of samples for parallel variant calling.
CWL input target that groups samples into batches and variant callers
for parallel processing.
If doing joint calling, with `tools_on: [gvcf]`, split the sample into
individuals instead of combining into a batch. | ['Prepare', 'a', 'set', 'of', 'samples', 'for', 'parallel', 'variant', 'calling', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L241-L272 |
9,855 | xflr6/concepts | concepts/tools.py | max_len | def max_len(iterable, minimum=0):
"""Return the len() of the longest item in ``iterable`` or ``minimum``.
>>> max_len(['spam', 'ham'])
4
>>> max_len([])
0
>>> max_len(['ham'], 4)
4
"""
try:
result = max(map(len, iterable))
except ValueError:
result = minimum
return minimum if result < minimum else result | python | def max_len(iterable, minimum=0):
"""Return the len() of the longest item in ``iterable`` or ``minimum``.
>>> max_len(['spam', 'ham'])
4
>>> max_len([])
0
>>> max_len(['ham'], 4)
4
"""
try:
result = max(map(len, iterable))
except ValueError:
result = minimum
return minimum if result < minimum else result | ['def', 'max_len', '(', 'iterable', ',', 'minimum', '=', '0', ')', ':', 'try', ':', 'result', '=', 'max', '(', 'map', '(', 'len', ',', 'iterable', ')', ')', 'except', 'ValueError', ':', 'result', '=', 'minimum', 'return', 'minimum', 'if', 'result', '<', 'minimum', 'else', 'result'] | Return the len() of the longest item in ``iterable`` or ``minimum``.
>>> max_len(['spam', 'ham'])
4
>>> max_len([])
0
>>> max_len(['ham'], 4)
4 | ['Return', 'the', 'len', '()', 'of', 'the', 'longest', 'item', 'in', 'iterable', 'or', 'minimum', '.'] | train | https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/tools.py#L123-L139 |
9,856 | tensorflow/tensorboard | tensorboard/plugins/debugger/tensor_store.py | TensorStore.add | def add(self, watch_key, tensor_value):
"""Add a tensor value.
Args:
watch_key: A string representing the debugger tensor watch, e.g.,
'Dense_1/BiasAdd:0:DebugIdentity'.
tensor_value: The value of the tensor as a numpy.ndarray.
"""
if watch_key not in self._tensor_data:
self._tensor_data[watch_key] = _WatchStore(
watch_key,
mem_bytes_limit=self._watch_mem_bytes_limit)
self._tensor_data[watch_key].add(tensor_value) | python | def add(self, watch_key, tensor_value):
"""Add a tensor value.
Args:
watch_key: A string representing the debugger tensor watch, e.g.,
'Dense_1/BiasAdd:0:DebugIdentity'.
tensor_value: The value of the tensor as a numpy.ndarray.
"""
if watch_key not in self._tensor_data:
self._tensor_data[watch_key] = _WatchStore(
watch_key,
mem_bytes_limit=self._watch_mem_bytes_limit)
self._tensor_data[watch_key].add(tensor_value) | ['def', 'add', '(', 'self', ',', 'watch_key', ',', 'tensor_value', ')', ':', 'if', 'watch_key', 'not', 'in', 'self', '.', '_tensor_data', ':', 'self', '.', '_tensor_data', '[', 'watch_key', ']', '=', '_WatchStore', '(', 'watch_key', ',', 'mem_bytes_limit', '=', 'self', '.', '_watch_mem_bytes_limit', ')', 'self', '.', '_tensor_data', '[', 'watch_key', ']', '.', 'add', '(', 'tensor_value', ')'] | Add a tensor value.
Args:
watch_key: A string representing the debugger tensor watch, e.g.,
'Dense_1/BiasAdd:0:DebugIdentity'.
tensor_value: The value of the tensor as a numpy.ndarray. | ['Add', 'a', 'tensor', 'value', '.'] | train | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/tensor_store.py#L186-L198 |
9,857 | peterbe/premailer | premailer/premailer.py | Premailer._process_css_text | def _process_css_text(self, css_text, index, rules, head):
"""processes the given css_text by adding rules that can be
in-lined to the given rules list and adding any that cannot
be in-lined to the given `<head>` element.
"""
these_rules, these_leftover = self._parse_style_rules(css_text, index)
rules.extend(these_rules)
if head is not None and (these_leftover or self.keep_style_tags):
style = etree.Element("style")
style.attrib["type"] = "text/css"
if self.keep_style_tags:
style.text = css_text
else:
style.text = self._css_rules_to_string(these_leftover)
head.append(style) | python | def _process_css_text(self, css_text, index, rules, head):
"""processes the given css_text by adding rules that can be
in-lined to the given rules list and adding any that cannot
be in-lined to the given `<head>` element.
"""
these_rules, these_leftover = self._parse_style_rules(css_text, index)
rules.extend(these_rules)
if head is not None and (these_leftover or self.keep_style_tags):
style = etree.Element("style")
style.attrib["type"] = "text/css"
if self.keep_style_tags:
style.text = css_text
else:
style.text = self._css_rules_to_string(these_leftover)
head.append(style) | ['def', '_process_css_text', '(', 'self', ',', 'css_text', ',', 'index', ',', 'rules', ',', 'head', ')', ':', 'these_rules', ',', 'these_leftover', '=', 'self', '.', '_parse_style_rules', '(', 'css_text', ',', 'index', ')', 'rules', '.', 'extend', '(', 'these_rules', ')', 'if', 'head', 'is', 'not', 'None', 'and', '(', 'these_leftover', 'or', 'self', '.', 'keep_style_tags', ')', ':', 'style', '=', 'etree', '.', 'Element', '(', '"style"', ')', 'style', '.', 'attrib', '[', '"type"', ']', '=', '"text/css"', 'if', 'self', '.', 'keep_style_tags', ':', 'style', '.', 'text', '=', 'css_text', 'else', ':', 'style', '.', 'text', '=', 'self', '.', '_css_rules_to_string', '(', 'these_leftover', ')', 'head', '.', 'append', '(', 'style', ')'] | processes the given css_text by adding rules that can be
in-lined to the given rules list and adding any that cannot
be in-lined to the given `<head>` element. | ['processes', 'the', 'given', 'css_text', 'by', 'adding', 'rules', 'that', 'can', 'be', 'in', '-', 'lined', 'to', 'the', 'given', 'rules', 'list', 'and', 'adding', 'any', 'that', 'cannot', 'be', 'in', '-', 'lined', 'to', 'the', 'given', '<head', '>', 'element', '.'] | train | https://github.com/peterbe/premailer/blob/4d74656fb12e8e44683fa787ae71c0735282376b/premailer/premailer.py#L658-L672 |
9,858 | quantmind/agile-toolkit | agiletoolkit/manager.py | Manager.manifest | def manifest(self, values, *paths, filename: str = None) -> Dict:
"""Load a manifest file and apply template values
"""
filename = filename or self.filename(*paths)
with open(filename, 'r') as fp:
template = Template(fp.read())
return yaml.load(template.render(values)) | python | def manifest(self, values, *paths, filename: str = None) -> Dict:
"""Load a manifest file and apply template values
"""
filename = filename or self.filename(*paths)
with open(filename, 'r') as fp:
template = Template(fp.read())
return yaml.load(template.render(values)) | ['def', 'manifest', '(', 'self', ',', 'values', ',', '*', 'paths', ',', 'filename', ':', 'str', '=', 'None', ')', '->', 'Dict', ':', 'filename', '=', 'filename', 'or', 'self', '.', 'filename', '(', '*', 'paths', ')', 'with', 'open', '(', 'filename', ',', "'r'", ')', 'as', 'fp', ':', 'template', '=', 'Template', '(', 'fp', '.', 'read', '(', ')', ')', 'return', 'yaml', '.', 'load', '(', 'template', '.', 'render', '(', 'values', ')', ')'] | Load a manifest file and apply template values | ['Load', 'a', 'manifest', 'file', 'and', 'apply', 'template', 'values'] | train | https://github.com/quantmind/agile-toolkit/blob/96028e36a842c57b171907c20583a60d1045fec1/agiletoolkit/manager.py#L48-L54 |
9,859 | deepmind/sonnet | sonnet/python/modules/basic.py | BatchReshape.transpose | def transpose(self, name=None):
"""Returns transpose batch reshape."""
if name is None:
name = self.module_name + "_transpose"
return BatchReshape(shape=lambda: self.input_shape,
preserve_dims=self._preserve_dims,
name=name) | python | def transpose(self, name=None):
"""Returns transpose batch reshape."""
if name is None:
name = self.module_name + "_transpose"
return BatchReshape(shape=lambda: self.input_shape,
preserve_dims=self._preserve_dims,
name=name) | ['def', 'transpose', '(', 'self', ',', 'name', '=', 'None', ')', ':', 'if', 'name', 'is', 'None', ':', 'name', '=', 'self', '.', 'module_name', '+', '"_transpose"', 'return', 'BatchReshape', '(', 'shape', '=', 'lambda', ':', 'self', '.', 'input_shape', ',', 'preserve_dims', '=', 'self', '.', '_preserve_dims', ',', 'name', '=', 'name', ')'] | Returns transpose batch reshape. | ['Returns', 'transpose', 'batch', 'reshape', '.'] | train | https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/basic.py#L868-L874 |
9,860 | dwavesystems/penaltymodel | penaltymodel_maxgap/penaltymodel/maxgap/smt.py | Table.message_upperbound | def message_upperbound(self, tree, spins, subtheta):
"""Determine an upper bound on the energy of the elimination tree.
Args:
tree (dict): The current elimination tree
spins (dict): The current fixed spins
subtheta (dict): Theta with spins fixed.
Returns:
The formula for the energy of the tree.
"""
energy_sources = set()
for v, subtree in tree.items():
assert all(u in spins for u in self._ancestors[v])
# build an iterable over all of the energies contributions
# that we can exactly determine given v and our known spins
# in these contributions we assume that v is positive
def energy_contributions():
yield subtheta.linear[v]
for u, bias in subtheta.adj[v].items():
if u in spins:
yield Times(limitReal(spins[u]), bias)
energy = Plus(energy_contributions())
# if there are no more variables in the order, we can stop
# otherwise we need the next message variable
if subtree:
spins[v] = 1.
plus = self.message_upperbound(subtree, spins, subtheta)
spins[v] = -1.
minus = self.message_upperbound(subtree, spins, subtheta)
del spins[v]
else:
plus = minus = limitReal(0.0)
# we now need a real-valued smt variable to be our message
m = FreshSymbol(REAL)
self.assertions.update({LE(m, Plus(energy, plus)),
LE(m, Plus(Times(energy, limitReal(-1.)), minus))})
energy_sources.add(m)
return Plus(energy_sources) | python | def message_upperbound(self, tree, spins, subtheta):
"""Determine an upper bound on the energy of the elimination tree.
Args:
tree (dict): The current elimination tree
spins (dict): The current fixed spins
subtheta (dict): Theta with spins fixed.
Returns:
The formula for the energy of the tree.
"""
energy_sources = set()
for v, subtree in tree.items():
assert all(u in spins for u in self._ancestors[v])
# build an iterable over all of the energies contributions
# that we can exactly determine given v and our known spins
# in these contributions we assume that v is positive
def energy_contributions():
yield subtheta.linear[v]
for u, bias in subtheta.adj[v].items():
if u in spins:
yield Times(limitReal(spins[u]), bias)
energy = Plus(energy_contributions())
# if there are no more variables in the order, we can stop
# otherwise we need the next message variable
if subtree:
spins[v] = 1.
plus = self.message_upperbound(subtree, spins, subtheta)
spins[v] = -1.
minus = self.message_upperbound(subtree, spins, subtheta)
del spins[v]
else:
plus = minus = limitReal(0.0)
# we now need a real-valued smt variable to be our message
m = FreshSymbol(REAL)
self.assertions.update({LE(m, Plus(energy, plus)),
LE(m, Plus(Times(energy, limitReal(-1.)), minus))})
energy_sources.add(m)
return Plus(energy_sources) | ['def', 'message_upperbound', '(', 'self', ',', 'tree', ',', 'spins', ',', 'subtheta', ')', ':', 'energy_sources', '=', 'set', '(', ')', 'for', 'v', ',', 'subtree', 'in', 'tree', '.', 'items', '(', ')', ':', 'assert', 'all', '(', 'u', 'in', 'spins', 'for', 'u', 'in', 'self', '.', '_ancestors', '[', 'v', ']', ')', '# build an iterable over all of the energies contributions', '# that we can exactly determine given v and our known spins', '# in these contributions we assume that v is positive', 'def', 'energy_contributions', '(', ')', ':', 'yield', 'subtheta', '.', 'linear', '[', 'v', ']', 'for', 'u', ',', 'bias', 'in', 'subtheta', '.', 'adj', '[', 'v', ']', '.', 'items', '(', ')', ':', 'if', 'u', 'in', 'spins', ':', 'yield', 'Times', '(', 'limitReal', '(', 'spins', '[', 'u', ']', ')', ',', 'bias', ')', 'energy', '=', 'Plus', '(', 'energy_contributions', '(', ')', ')', '# if there are no more variables in the order, we can stop', '# otherwise we need the next message variable', 'if', 'subtree', ':', 'spins', '[', 'v', ']', '=', '1.', 'plus', '=', 'self', '.', 'message_upperbound', '(', 'subtree', ',', 'spins', ',', 'subtheta', ')', 'spins', '[', 'v', ']', '=', '-', '1.', 'minus', '=', 'self', '.', 'message_upperbound', '(', 'subtree', ',', 'spins', ',', 'subtheta', ')', 'del', 'spins', '[', 'v', ']', 'else', ':', 'plus', '=', 'minus', '=', 'limitReal', '(', '0.0', ')', '# we now need a real-valued smt variable to be our message', 'm', '=', 'FreshSymbol', '(', 'REAL', ')', 'self', '.', 'assertions', '.', 'update', '(', '{', 'LE', '(', 'm', ',', 'Plus', '(', 'energy', ',', 'plus', ')', ')', ',', 'LE', '(', 'm', ',', 'Plus', '(', 'Times', '(', 'energy', ',', 'limitReal', '(', '-', '1.', ')', ')', ',', 'minus', ')', ')', '}', ')', 'energy_sources', '.', 'add', '(', 'm', ')', 'return', 'Plus', '(', 'energy_sources', ')'] | Determine an upper bound on the energy of the elimination tree.
Args:
tree (dict): The current elimination tree
spins (dict): The current fixed spins
subtheta (dict): Theta with spins fixed.
Returns:
The formula for the energy of the tree. | ['Determine', 'an', 'upper', 'bound', 'on', 'the', 'energy', 'of', 'the', 'elimination', 'tree', '.'] | train | https://github.com/dwavesystems/penaltymodel/blob/b9d343233aea8df0f59cea45a07f12d0b3b8d9b3/penaltymodel_maxgap/penaltymodel/maxgap/smt.py#L255-L303 |
9,861 | osrg/ryu | ryu/cmd/of_config_cli.py | Cmd.do_list_cap | def do_list_cap(self, line):
"""list_cap <peer>
"""
def f(p, args):
for i in p.netconf.server_capabilities:
print(i)
self._request(line, f) | python | def do_list_cap(self, line):
"""list_cap <peer>
"""
def f(p, args):
for i in p.netconf.server_capabilities:
print(i)
self._request(line, f) | ['def', 'do_list_cap', '(', 'self', ',', 'line', ')', ':', 'def', 'f', '(', 'p', ',', 'args', ')', ':', 'for', 'i', 'in', 'p', '.', 'netconf', '.', 'server_capabilities', ':', 'print', '(', 'i', ')', 'self', '.', '_request', '(', 'line', ',', 'f', ')'] | list_cap <peer> | ['list_cap', '<peer', '>'] | train | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/cmd/of_config_cli.py#L104-L112 |
9,862 | googleapis/google-cloud-python | dns/google/cloud/dns/changes.py | Changes.delete_record_set | def delete_record_set(self, record_set):
"""Append a record set to the 'deletions' for the change set.
:type record_set:
:class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:param record_set: the record set to append.
:raises: ``ValueError`` if ``record_set`` is not of the required type.
"""
if not isinstance(record_set, ResourceRecordSet):
raise ValueError("Pass a ResourceRecordSet")
self._deletions += (record_set,) | python | def delete_record_set(self, record_set):
"""Append a record set to the 'deletions' for the change set.
:type record_set:
:class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:param record_set: the record set to append.
:raises: ``ValueError`` if ``record_set`` is not of the required type.
"""
if not isinstance(record_set, ResourceRecordSet):
raise ValueError("Pass a ResourceRecordSet")
self._deletions += (record_set,) | ['def', 'delete_record_set', '(', 'self', ',', 'record_set', ')', ':', 'if', 'not', 'isinstance', '(', 'record_set', ',', 'ResourceRecordSet', ')', ':', 'raise', 'ValueError', '(', '"Pass a ResourceRecordSet"', ')', 'self', '.', '_deletions', '+=', '(', 'record_set', ',', ')'] | Append a record set to the 'deletions' for the change set.
:type record_set:
:class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:param record_set: the record set to append.
:raises: ``ValueError`` if ``record_set`` is not of the required type. | ['Append', 'a', 'record', 'set', 'to', 'the', 'deletions', 'for', 'the', 'change', 'set', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/dns/google/cloud/dns/changes.py#L165-L176 |
9,863 | dedupeio/dedupe | dedupe/api.py | GazetteerMatching.match | def match(self, messy_data, threshold=0.5, n_matches=1, generator=False): # pragma: no cover
"""Identifies pairs of records that refer to the same entity, returns
tuples containing a set of record ids and a confidence score as a float
between 0 and 1. The record_ids within each set should refer to the
same entity and the confidence score is the estimated probability that
the records refer to the same entity.
This method should only used for small to moderately sized datasets
for larger data, use matchBlocks
Arguments:
messy_data -- Dictionary of records from messy dataset, where the
keys are record_ids and the values are dictionaries with
the keys being field names
threshold -- Number between 0 and 1 (default is .5). We will consider
records as potential duplicates if the predicted
probability of being a duplicate is above the threshold.
Lowering the number will increase recall, raising it
will increase precision
n_matches -- Maximum number of possible matches from the canonical
record set to match against each record in the messy
record set
"""
blocked_pairs = self._blockData(messy_data)
clusters = self.matchBlocks(blocked_pairs, threshold, n_matches)
clusters = (cluster for cluster in clusters if len(cluster))
if generator:
return clusters
else:
return list(clusters) | python | def match(self, messy_data, threshold=0.5, n_matches=1, generator=False): # pragma: no cover
"""Identifies pairs of records that refer to the same entity, returns
tuples containing a set of record ids and a confidence score as a float
between 0 and 1. The record_ids within each set should refer to the
same entity and the confidence score is the estimated probability that
the records refer to the same entity.
This method should only used for small to moderately sized datasets
for larger data, use matchBlocks
Arguments:
messy_data -- Dictionary of records from messy dataset, where the
keys are record_ids and the values are dictionaries with
the keys being field names
threshold -- Number between 0 and 1 (default is .5). We will consider
records as potential duplicates if the predicted
probability of being a duplicate is above the threshold.
Lowering the number will increase recall, raising it
will increase precision
n_matches -- Maximum number of possible matches from the canonical
record set to match against each record in the messy
record set
"""
blocked_pairs = self._blockData(messy_data)
clusters = self.matchBlocks(blocked_pairs, threshold, n_matches)
clusters = (cluster for cluster in clusters if len(cluster))
if generator:
return clusters
else:
return list(clusters) | ['def', 'match', '(', 'self', ',', 'messy_data', ',', 'threshold', '=', '0.5', ',', 'n_matches', '=', '1', ',', 'generator', '=', 'False', ')', ':', '# pragma: no cover', 'blocked_pairs', '=', 'self', '.', '_blockData', '(', 'messy_data', ')', 'clusters', '=', 'self', '.', 'matchBlocks', '(', 'blocked_pairs', ',', 'threshold', ',', 'n_matches', ')', 'clusters', '=', '(', 'cluster', 'for', 'cluster', 'in', 'clusters', 'if', 'len', '(', 'cluster', ')', ')', 'if', 'generator', ':', 'return', 'clusters', 'else', ':', 'return', 'list', '(', 'clusters', ')'] | Identifies pairs of records that refer to the same entity, returns
tuples containing a set of record ids and a confidence score as a float
between 0 and 1. The record_ids within each set should refer to the
same entity and the confidence score is the estimated probability that
the records refer to the same entity.
This method should only used for small to moderately sized datasets
for larger data, use matchBlocks
Arguments:
messy_data -- Dictionary of records from messy dataset, where the
keys are record_ids and the values are dictionaries with
the keys being field names
threshold -- Number between 0 and 1 (default is .5). We will consider
records as potential duplicates if the predicted
probability of being a duplicate is above the threshold.
Lowering the number will increase recall, raising it
will increase precision
n_matches -- Maximum number of possible matches from the canonical
record set to match against each record in the messy
record set | ['Identifies', 'pairs', 'of', 'records', 'that', 'refer', 'to', 'the', 'same', 'entity', 'returns', 'tuples', 'containing', 'a', 'set', 'of', 'record', 'ids', 'and', 'a', 'confidence', 'score', 'as', 'a', 'float', 'between', '0', 'and', '1', '.', 'The', 'record_ids', 'within', 'each', 'set', 'should', 'refer', 'to', 'the', 'same', 'entity', 'and', 'the', 'confidence', 'score', 'is', 'the', 'estimated', 'probability', 'that', 'the', 'records', 'refer', 'to', 'the', 'same', 'entity', '.'] | train | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/api.py#L920-L955 |
9,864 | has2k1/plydata | plydata/eval.py | EvalEnvironment.capture | def capture(cls, eval_env=0, reference=0):
"""Capture an execution environment from the stack.
If `eval_env` is already an :class:`EvalEnvironment`, it is returned
unchanged. Otherwise, we walk up the stack by ``eval_env + reference``
steps and capture that function's evaluation environment.
For ``eval_env=0`` and ``reference=0``, the default, this captures the
stack frame of the function that calls :meth:`capture`. If ``eval_env
+ reference`` is 1, then we capture that function's caller, etc.
This somewhat complicated calling convention is designed to be
convenient for functions which want to capture their caller's
environment by default, but also allow explicit environments to be
specified. See the second example.
Example::
x = 1
this_env = EvalEnvironment.capture()
assert this_env.namespace["x"] == 1
def child_func():
return EvalEnvironment.capture(1)
this_env_from_child = child_func()
assert this_env_from_child.namespace["x"] == 1
Example::
# This function can be used like:
# my_model(formula_like, data)
# -> evaluates formula_like in caller's environment
# my_model(formula_like, data, eval_env=1)
# -> evaluates formula_like in caller's caller's environment
# my_model(formula_like, data, eval_env=my_env)
# -> evaluates formula_like in environment 'my_env'
def my_model(formula_like, data, eval_env=0):
eval_env = EvalEnvironment.capture(eval_env, reference=1)
return model_setup_helper(formula_like, data, eval_env)
This is how :func:`dmatrix` works.
.. versionadded: 0.2.0
The ``reference`` argument.
"""
if isinstance(eval_env, cls):
return eval_env
elif isinstance(eval_env, numbers.Integral):
depth = eval_env + reference
else:
raise TypeError("Parameter 'eval_env' must be either an integer "
"or an instance of patsy.EvalEnvironment.")
frame = inspect.currentframe()
try:
for i in range(depth + 1):
if frame is None:
raise ValueError("call-stack is not that deep!")
frame = frame.f_back
return cls([frame.f_locals, frame.f_globals],
frame.f_code.co_flags & _ALL_FUTURE_FLAGS)
# The try/finally is important to avoid a potential reference cycle --
# any exception traceback will carry a reference to *our* frame, which
# contains a reference to our local variables, which would otherwise
# carry a reference to some parent frame, where the exception was
# caught...:
finally:
del frame | python | def capture(cls, eval_env=0, reference=0):
"""Capture an execution environment from the stack.
If `eval_env` is already an :class:`EvalEnvironment`, it is returned
unchanged. Otherwise, we walk up the stack by ``eval_env + reference``
steps and capture that function's evaluation environment.
For ``eval_env=0`` and ``reference=0``, the default, this captures the
stack frame of the function that calls :meth:`capture`. If ``eval_env
+ reference`` is 1, then we capture that function's caller, etc.
This somewhat complicated calling convention is designed to be
convenient for functions which want to capture their caller's
environment by default, but also allow explicit environments to be
specified. See the second example.
Example::
x = 1
this_env = EvalEnvironment.capture()
assert this_env.namespace["x"] == 1
def child_func():
return EvalEnvironment.capture(1)
this_env_from_child = child_func()
assert this_env_from_child.namespace["x"] == 1
Example::
# This function can be used like:
# my_model(formula_like, data)
# -> evaluates formula_like in caller's environment
# my_model(formula_like, data, eval_env=1)
# -> evaluates formula_like in caller's caller's environment
# my_model(formula_like, data, eval_env=my_env)
# -> evaluates formula_like in environment 'my_env'
def my_model(formula_like, data, eval_env=0):
eval_env = EvalEnvironment.capture(eval_env, reference=1)
return model_setup_helper(formula_like, data, eval_env)
This is how :func:`dmatrix` works.
.. versionadded: 0.2.0
The ``reference`` argument.
"""
if isinstance(eval_env, cls):
return eval_env
elif isinstance(eval_env, numbers.Integral):
depth = eval_env + reference
else:
raise TypeError("Parameter 'eval_env' must be either an integer "
"or an instance of patsy.EvalEnvironment.")
frame = inspect.currentframe()
try:
for i in range(depth + 1):
if frame is None:
raise ValueError("call-stack is not that deep!")
frame = frame.f_back
return cls([frame.f_locals, frame.f_globals],
frame.f_code.co_flags & _ALL_FUTURE_FLAGS)
# The try/finally is important to avoid a potential reference cycle --
# any exception traceback will carry a reference to *our* frame, which
# contains a reference to our local variables, which would otherwise
# carry a reference to some parent frame, where the exception was
# caught...:
finally:
del frame | ['def', 'capture', '(', 'cls', ',', 'eval_env', '=', '0', ',', 'reference', '=', '0', ')', ':', 'if', 'isinstance', '(', 'eval_env', ',', 'cls', ')', ':', 'return', 'eval_env', 'elif', 'isinstance', '(', 'eval_env', ',', 'numbers', '.', 'Integral', ')', ':', 'depth', '=', 'eval_env', '+', 'reference', 'else', ':', 'raise', 'TypeError', '(', '"Parameter \'eval_env\' must be either an integer "', '"or an instance of patsy.EvalEnvironment."', ')', 'frame', '=', 'inspect', '.', 'currentframe', '(', ')', 'try', ':', 'for', 'i', 'in', 'range', '(', 'depth', '+', '1', ')', ':', 'if', 'frame', 'is', 'None', ':', 'raise', 'ValueError', '(', '"call-stack is not that deep!"', ')', 'frame', '=', 'frame', '.', 'f_back', 'return', 'cls', '(', '[', 'frame', '.', 'f_locals', ',', 'frame', '.', 'f_globals', ']', ',', 'frame', '.', 'f_code', '.', 'co_flags', '&', '_ALL_FUTURE_FLAGS', ')', '# The try/finally is important to avoid a potential reference cycle --', '# any exception traceback will carry a reference to *our* frame, which', '# contains a reference to our local variables, which would otherwise', '# carry a reference to some parent frame, where the exception was', '# caught...:', 'finally', ':', 'del', 'frame'] | Capture an execution environment from the stack.
If `eval_env` is already an :class:`EvalEnvironment`, it is returned
unchanged. Otherwise, we walk up the stack by ``eval_env + reference``
steps and capture that function's evaluation environment.
For ``eval_env=0`` and ``reference=0``, the default, this captures the
stack frame of the function that calls :meth:`capture`. If ``eval_env
+ reference`` is 1, then we capture that function's caller, etc.
This somewhat complicated calling convention is designed to be
convenient for functions which want to capture their caller's
environment by default, but also allow explicit environments to be
specified. See the second example.
Example::
x = 1
this_env = EvalEnvironment.capture()
assert this_env.namespace["x"] == 1
def child_func():
return EvalEnvironment.capture(1)
this_env_from_child = child_func()
assert this_env_from_child.namespace["x"] == 1
Example::
# This function can be used like:
# my_model(formula_like, data)
# -> evaluates formula_like in caller's environment
# my_model(formula_like, data, eval_env=1)
# -> evaluates formula_like in caller's caller's environment
# my_model(formula_like, data, eval_env=my_env)
# -> evaluates formula_like in environment 'my_env'
def my_model(formula_like, data, eval_env=0):
eval_env = EvalEnvironment.capture(eval_env, reference=1)
return model_setup_helper(formula_like, data, eval_env)
This is how :func:`dmatrix` works.
.. versionadded: 0.2.0
The ``reference`` argument. | ['Capture', 'an', 'execution', 'environment', 'from', 'the', 'stack', '.', 'If', 'eval_env', 'is', 'already', 'an', ':', 'class', ':', 'EvalEnvironment', 'it', 'is', 'returned', 'unchanged', '.', 'Otherwise', 'we', 'walk', 'up', 'the', 'stack', 'by', 'eval_env', '+', 'reference', 'steps', 'and', 'capture', 'that', 'function', 's', 'evaluation', 'environment', '.', 'For', 'eval_env', '=', '0', 'and', 'reference', '=', '0', 'the', 'default', 'this', 'captures', 'the', 'stack', 'frame', 'of', 'the', 'function', 'that', 'calls', ':', 'meth', ':', 'capture', '.', 'If', 'eval_env', '+', 'reference', 'is', '1', 'then', 'we', 'capture', 'that', 'function', 's', 'caller', 'etc', '.', 'This', 'somewhat', 'complicated', 'calling', 'convention', 'is', 'designed', 'to', 'be', 'convenient', 'for', 'functions', 'which', 'want', 'to', 'capture', 'their', 'caller', 's', 'environment', 'by', 'default', 'but', 'also', 'allow', 'explicit', 'environments', 'to', 'be', 'specified', '.', 'See', 'the', 'second', 'example', '.', 'Example', '::', 'x', '=', '1', 'this_env', '=', 'EvalEnvironment', '.', 'capture', '()', 'assert', 'this_env', '.', 'namespace', '[', 'x', ']', '==', '1', 'def', 'child_func', '()', ':', 'return', 'EvalEnvironment', '.', 'capture', '(', '1', ')', 'this_env_from_child', '=', 'child_func', '()', 'assert', 'this_env_from_child', '.', 'namespace', '[', 'x', ']', '==', '1', 'Example', '::', '#', 'This', 'function', 'can', 'be', 'used', 'like', ':', '#', 'my_model', '(', 'formula_like', 'data', ')', '#', '-', '>', 'evaluates', 'formula_like', 'in', 'caller', 's', 'environment', '#', 'my_model', '(', 'formula_like', 'data', 'eval_env', '=', '1', ')', '#', '-', '>', 'evaluates', 'formula_like', 'in', 'caller', 's', 'caller', 's', 'environment', '#', 'my_model', '(', 'formula_like', 'data', 'eval_env', '=', 'my_env', ')', '#', '-', '>', 'evaluates', 'formula_like', 'in', 'environment', 'my_env', 'def', 'my_model', '(', 'formula_like', 'data', 'eval_env', '=', '0', ')', ':', 'eval_env', '=', 'EvalEnvironment', '.', 'capture', '(', 'eval_env', 'reference', '=', '1', ')', 'return', 'model_setup_helper', '(', 'formula_like', 'data', 'eval_env', ')', 'This', 'is', 'how', ':', 'func', ':', 'dmatrix', 'works', '.', '..', 'versionadded', ':', '0', '.', '2', '.', '0', 'The', 'reference', 'argument', '.'] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/eval.py#L101-L157 |
9,865 | fboender/ansible-cmdb | src/ansiblecmdb/parser.py | HostsParser._parse_vars | def _parse_vars(self, tokens):
"""
Given an iterable of tokens, returns variables and their values as a
dictionary.
For example:
['dtap=prod', 'comment=some comment']
Returns:
{'dtap': 'prod', 'comment': 'some comment'}
"""
key_values = {}
for token in tokens:
if token.startswith('#'):
# End parsing if we encounter a comment, which lasts
# until the end of the line.
break
else:
k, v = token.split('=', 1)
key = k.strip()
key_values[key] = v.strip()
return key_values | python | def _parse_vars(self, tokens):
"""
Given an iterable of tokens, returns variables and their values as a
dictionary.
For example:
['dtap=prod', 'comment=some comment']
Returns:
{'dtap': 'prod', 'comment': 'some comment'}
"""
key_values = {}
for token in tokens:
if token.startswith('#'):
# End parsing if we encounter a comment, which lasts
# until the end of the line.
break
else:
k, v = token.split('=', 1)
key = k.strip()
key_values[key] = v.strip()
return key_values | ['def', '_parse_vars', '(', 'self', ',', 'tokens', ')', ':', 'key_values', '=', '{', '}', 'for', 'token', 'in', 'tokens', ':', 'if', 'token', '.', 'startswith', '(', "'#'", ')', ':', '# End parsing if we encounter a comment, which lasts', '# until the end of the line.', 'break', 'else', ':', 'k', ',', 'v', '=', 'token', '.', 'split', '(', "'='", ',', '1', ')', 'key', '=', 'k', '.', 'strip', '(', ')', 'key_values', '[', 'key', ']', '=', 'v', '.', 'strip', '(', ')', 'return', 'key_values'] | Given an iterable of tokens, returns variables and their values as a
dictionary.
For example:
['dtap=prod', 'comment=some comment']
Returns:
{'dtap': 'prod', 'comment': 'some comment'} | ['Given', 'an', 'iterable', 'of', 'tokens', 'returns', 'variables', 'and', 'their', 'values', 'as', 'a', 'dictionary', '.'] | train | https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/src/ansiblecmdb/parser.py#L194-L214 |
9,866 | eqcorrscan/EQcorrscan | eqcorrscan/utils/mag_calc.py | calc_b_value | def calc_b_value(magnitudes, completeness, max_mag=None, plotvar=True):
"""
Calculate the b-value for a range of completeness magnitudes.
Calculates a power-law fit to given magnitudes for each completeness
magnitude. Plots the b-values and residuals for the fitted catalogue
against the completeness values. Computes fits using numpy.polyfit,
which uses a least-squares technique.
:type magnitudes: list
:param magnitudes: Magnitudes to compute the b-value for.
:type completeness: list
:param completeness: list of completeness values to compute b-values for.
:type max_mag: float
:param max_mag: Maximum magnitude to attempt to fit in magnitudes.
:type plotvar: bool
:param plotvar: Turn plotting on or off.
:rtype: list
:return: List of tuples of (completeness, b-value, residual,\
number of magnitudes used)
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.mag_calc import calc_b_value
>>> client = Client('IRIS')
>>> t1 = UTCDateTime('2012-03-26T00:00:00')
>>> t2 = t1 + (3 * 86400)
>>> catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3)
>>> magnitudes = [event.magnitudes[0].mag for event in catalog]
>>> b_values = calc_b_value(magnitudes, completeness=np.arange(3, 7, 0.2),
... plotvar=False)
>>> round(b_values[4][1])
1.0
>>> # We can set a maximum magnitude:
>>> b_values = calc_b_value(magnitudes, completeness=np.arange(3, 7, 0.2),
... plotvar=False, max_mag=5)
>>> round(b_values[4][1])
1.0
"""
b_values = []
# Calculate the cdf for all magnitudes
counts = Counter(magnitudes)
cdf = np.zeros(len(counts))
mag_steps = np.zeros(len(counts))
for i, magnitude in enumerate(sorted(counts.keys(), reverse=True)):
mag_steps[i] = magnitude
if i > 0:
cdf[i] = cdf[i - 1] + counts[magnitude]
else:
cdf[i] = counts[magnitude]
if not max_mag:
max_mag = max(magnitudes)
for m_c in completeness:
if m_c >= max_mag or m_c >= max(magnitudes):
warnings.warn('Not computing completeness at %s, above max_mag' %
str(m_c))
break
complete_mags = []
complete_freq = []
for i, mag in enumerate(mag_steps):
if mag >= m_c <= max_mag:
complete_mags.append(mag)
complete_freq.append(np.log10(cdf[i]))
if len(complete_mags) < 4:
warnings.warn('Not computing completeness above ' + str(m_c) +
', fewer than 4 events')
break
fit = np.polyfit(complete_mags, complete_freq, 1, full=True)
# Calculate the residuals according to the Wiemer & Wys 2000 definition
predicted_freqs = [fit[0][1] - abs(fit[0][0] * M)
for M in complete_mags]
r = 100 - ((np.sum([abs(complete_freq[i] - predicted_freqs[i])
for i in range(len(complete_freq))]) * 100) /
np.sum(complete_freq))
b_values.append((m_c, abs(fit[0][0]), r, str(len(complete_mags))))
if plotvar:
fig, ax1 = plt.subplots()
b_vals = ax1.scatter(list(zip(*b_values))[0], list(zip(*b_values))[1],
c='k')
resid = ax1.scatter(list(zip(*b_values))[0],
[100 - b for b in list(zip(*b_values))[2]], c='r')
ax1.set_ylabel('b-value and residual')
plt.xlabel('Completeness magnitude')
ax2 = ax1.twinx()
ax2.set_ylabel('Number of events used in fit')
n_ev = ax2.scatter(list(zip(*b_values))[0], list(zip(*b_values))[3],
c='g')
fig.legend((b_vals, resid, n_ev),
('b-values', 'residuals', 'number of events'),
'lower right')
ax1.set_title('Possible completeness values')
plt.show()
return b_values | python | def calc_b_value(magnitudes, completeness, max_mag=None, plotvar=True):
"""
Calculate the b-value for a range of completeness magnitudes.
Calculates a power-law fit to given magnitudes for each completeness
magnitude. Plots the b-values and residuals for the fitted catalogue
against the completeness values. Computes fits using numpy.polyfit,
which uses a least-squares technique.
:type magnitudes: list
:param magnitudes: Magnitudes to compute the b-value for.
:type completeness: list
:param completeness: list of completeness values to compute b-values for.
:type max_mag: float
:param max_mag: Maximum magnitude to attempt to fit in magnitudes.
:type plotvar: bool
:param plotvar: Turn plotting on or off.
:rtype: list
:return: List of tuples of (completeness, b-value, residual,\
number of magnitudes used)
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.mag_calc import calc_b_value
>>> client = Client('IRIS')
>>> t1 = UTCDateTime('2012-03-26T00:00:00')
>>> t2 = t1 + (3 * 86400)
>>> catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3)
>>> magnitudes = [event.magnitudes[0].mag for event in catalog]
>>> b_values = calc_b_value(magnitudes, completeness=np.arange(3, 7, 0.2),
... plotvar=False)
>>> round(b_values[4][1])
1.0
>>> # We can set a maximum magnitude:
>>> b_values = calc_b_value(magnitudes, completeness=np.arange(3, 7, 0.2),
... plotvar=False, max_mag=5)
>>> round(b_values[4][1])
1.0
"""
b_values = []
# Calculate the cdf for all magnitudes
counts = Counter(magnitudes)
cdf = np.zeros(len(counts))
mag_steps = np.zeros(len(counts))
for i, magnitude in enumerate(sorted(counts.keys(), reverse=True)):
mag_steps[i] = magnitude
if i > 0:
cdf[i] = cdf[i - 1] + counts[magnitude]
else:
cdf[i] = counts[magnitude]
if not max_mag:
max_mag = max(magnitudes)
for m_c in completeness:
if m_c >= max_mag or m_c >= max(magnitudes):
warnings.warn('Not computing completeness at %s, above max_mag' %
str(m_c))
break
complete_mags = []
complete_freq = []
for i, mag in enumerate(mag_steps):
if mag >= m_c <= max_mag:
complete_mags.append(mag)
complete_freq.append(np.log10(cdf[i]))
if len(complete_mags) < 4:
warnings.warn('Not computing completeness above ' + str(m_c) +
', fewer than 4 events')
break
fit = np.polyfit(complete_mags, complete_freq, 1, full=True)
# Calculate the residuals according to the Wiemer & Wys 2000 definition
predicted_freqs = [fit[0][1] - abs(fit[0][0] * M)
for M in complete_mags]
r = 100 - ((np.sum([abs(complete_freq[i] - predicted_freqs[i])
for i in range(len(complete_freq))]) * 100) /
np.sum(complete_freq))
b_values.append((m_c, abs(fit[0][0]), r, str(len(complete_mags))))
if plotvar:
fig, ax1 = plt.subplots()
b_vals = ax1.scatter(list(zip(*b_values))[0], list(zip(*b_values))[1],
c='k')
resid = ax1.scatter(list(zip(*b_values))[0],
[100 - b for b in list(zip(*b_values))[2]], c='r')
ax1.set_ylabel('b-value and residual')
plt.xlabel('Completeness magnitude')
ax2 = ax1.twinx()
ax2.set_ylabel('Number of events used in fit')
n_ev = ax2.scatter(list(zip(*b_values))[0], list(zip(*b_values))[3],
c='g')
fig.legend((b_vals, resid, n_ev),
('b-values', 'residuals', 'number of events'),
'lower right')
ax1.set_title('Possible completeness values')
plt.show()
return b_values | ['def', 'calc_b_value', '(', 'magnitudes', ',', 'completeness', ',', 'max_mag', '=', 'None', ',', 'plotvar', '=', 'True', ')', ':', 'b_values', '=', '[', ']', '# Calculate the cdf for all magnitudes', 'counts', '=', 'Counter', '(', 'magnitudes', ')', 'cdf', '=', 'np', '.', 'zeros', '(', 'len', '(', 'counts', ')', ')', 'mag_steps', '=', 'np', '.', 'zeros', '(', 'len', '(', 'counts', ')', ')', 'for', 'i', ',', 'magnitude', 'in', 'enumerate', '(', 'sorted', '(', 'counts', '.', 'keys', '(', ')', ',', 'reverse', '=', 'True', ')', ')', ':', 'mag_steps', '[', 'i', ']', '=', 'magnitude', 'if', 'i', '>', '0', ':', 'cdf', '[', 'i', ']', '=', 'cdf', '[', 'i', '-', '1', ']', '+', 'counts', '[', 'magnitude', ']', 'else', ':', 'cdf', '[', 'i', ']', '=', 'counts', '[', 'magnitude', ']', 'if', 'not', 'max_mag', ':', 'max_mag', '=', 'max', '(', 'magnitudes', ')', 'for', 'm_c', 'in', 'completeness', ':', 'if', 'm_c', '>=', 'max_mag', 'or', 'm_c', '>=', 'max', '(', 'magnitudes', ')', ':', 'warnings', '.', 'warn', '(', "'Not computing completeness at %s, above max_mag'", '%', 'str', '(', 'm_c', ')', ')', 'break', 'complete_mags', '=', '[', ']', 'complete_freq', '=', '[', ']', 'for', 'i', ',', 'mag', 'in', 'enumerate', '(', 'mag_steps', ')', ':', 'if', 'mag', '>=', 'm_c', '<=', 'max_mag', ':', 'complete_mags', '.', 'append', '(', 'mag', ')', 'complete_freq', '.', 'append', '(', 'np', '.', 'log10', '(', 'cdf', '[', 'i', ']', ')', ')', 'if', 'len', '(', 'complete_mags', ')', '<', '4', ':', 'warnings', '.', 'warn', '(', "'Not computing completeness above '", '+', 'str', '(', 'm_c', ')', '+', "', fewer than 4 events'", ')', 'break', 'fit', '=', 'np', '.', 'polyfit', '(', 'complete_mags', ',', 'complete_freq', ',', '1', ',', 'full', '=', 'True', ')', '# Calculate the residuals according to the Wiemer & Wys 2000 definition', 'predicted_freqs', '=', '[', 'fit', '[', '0', ']', '[', '1', ']', '-', 'abs', '(', 'fit', '[', '0', ']', '[', '0', ']', '*', 'M', ')', 'for', 'M', 'in', 'complete_mags', ']', 'r', '=', '100', '-', '(', '(', 'np', '.', 'sum', '(', '[', 'abs', '(', 'complete_freq', '[', 'i', ']', '-', 'predicted_freqs', '[', 'i', ']', ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'complete_freq', ')', ')', ']', ')', '*', '100', ')', '/', 'np', '.', 'sum', '(', 'complete_freq', ')', ')', 'b_values', '.', 'append', '(', '(', 'm_c', ',', 'abs', '(', 'fit', '[', '0', ']', '[', '0', ']', ')', ',', 'r', ',', 'str', '(', 'len', '(', 'complete_mags', ')', ')', ')', ')', 'if', 'plotvar', ':', 'fig', ',', 'ax1', '=', 'plt', '.', 'subplots', '(', ')', 'b_vals', '=', 'ax1', '.', 'scatter', '(', 'list', '(', 'zip', '(', '*', 'b_values', ')', ')', '[', '0', ']', ',', 'list', '(', 'zip', '(', '*', 'b_values', ')', ')', '[', '1', ']', ',', 'c', '=', "'k'", ')', 'resid', '=', 'ax1', '.', 'scatter', '(', 'list', '(', 'zip', '(', '*', 'b_values', ')', ')', '[', '0', ']', ',', '[', '100', '-', 'b', 'for', 'b', 'in', 'list', '(', 'zip', '(', '*', 'b_values', ')', ')', '[', '2', ']', ']', ',', 'c', '=', "'r'", ')', 'ax1', '.', 'set_ylabel', '(', "'b-value and residual'", ')', 'plt', '.', 'xlabel', '(', "'Completeness magnitude'", ')', 'ax2', '=', 'ax1', '.', 'twinx', '(', ')', 'ax2', '.', 'set_ylabel', '(', "'Number of events used in fit'", ')', 'n_ev', '=', 'ax2', '.', 'scatter', '(', 'list', '(', 'zip', '(', '*', 'b_values', ')', ')', '[', '0', ']', ',', 'list', '(', 'zip', '(', '*', 'b_values', ')', ')', '[', '3', ']', ',', 'c', '=', "'g'", ')', 'fig', '.', 'legend', '(', '(', 'b_vals', ',', 'resid', ',', 'n_ev', ')', ',', '(', "'b-values'", ',', "'residuals'", ',', "'number of events'", ')', ',', "'lower right'", ')', 'ax1', '.', 'set_title', '(', "'Possible completeness values'", ')', 'plt', '.', 'show', '(', ')', 'return', 'b_values'] | Calculate the b-value for a range of completeness magnitudes.
Calculates a power-law fit to given magnitudes for each completeness
magnitude. Plots the b-values and residuals for the fitted catalogue
against the completeness values. Computes fits using numpy.polyfit,
which uses a least-squares technique.
:type magnitudes: list
:param magnitudes: Magnitudes to compute the b-value for.
:type completeness: list
:param completeness: list of completeness values to compute b-values for.
:type max_mag: float
:param max_mag: Maximum magnitude to attempt to fit in magnitudes.
:type plotvar: bool
:param plotvar: Turn plotting on or off.
:rtype: list
:return: List of tuples of (completeness, b-value, residual,\
number of magnitudes used)
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.mag_calc import calc_b_value
>>> client = Client('IRIS')
>>> t1 = UTCDateTime('2012-03-26T00:00:00')
>>> t2 = t1 + (3 * 86400)
>>> catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3)
>>> magnitudes = [event.magnitudes[0].mag for event in catalog]
>>> b_values = calc_b_value(magnitudes, completeness=np.arange(3, 7, 0.2),
... plotvar=False)
>>> round(b_values[4][1])
1.0
>>> # We can set a maximum magnitude:
>>> b_values = calc_b_value(magnitudes, completeness=np.arange(3, 7, 0.2),
... plotvar=False, max_mag=5)
>>> round(b_values[4][1])
1.0 | ['Calculate', 'the', 'b', '-', 'value', 'for', 'a', 'range', 'of', 'completeness', 'magnitudes', '.'] | train | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/mag_calc.py#L132-L228 |
9,867 | carpedm20/fbchat | fbchat/_client.py | Client.setDefaultThread | def setDefaultThread(self, thread_id, thread_type):
"""
Sets default thread to send messages to
:param thread_id: User/Group ID to default to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType
"""
self._default_thread_id = thread_id
self._default_thread_type = thread_type | python | def setDefaultThread(self, thread_id, thread_type):
"""
Sets default thread to send messages to
:param thread_id: User/Group ID to default to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType
"""
self._default_thread_id = thread_id
self._default_thread_type = thread_type | ['def', 'setDefaultThread', '(', 'self', ',', 'thread_id', ',', 'thread_type', ')', ':', 'self', '.', '_default_thread_id', '=', 'thread_id', 'self', '.', '_default_thread_type', '=', 'thread_type'] | Sets default thread to send messages to
:param thread_id: User/Group ID to default to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType | ['Sets', 'default', 'thread', 'to', 'send', 'messages', 'to'] | train | https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L550-L559 |
9,868 | dpa-newslab/livebridge | livebridge/base/sources.py | BaseSource._db | def _db(self):
"""Database client for accessing storage.
:returns: :class:`livebridge.storages.base.BaseStorage` """
if not hasattr(self, "_db_client") or getattr(self, "_db_client") is None:
self._db_client = get_db_client()
return self._db_client | python | def _db(self):
"""Database client for accessing storage.
:returns: :class:`livebridge.storages.base.BaseStorage` """
if not hasattr(self, "_db_client") or getattr(self, "_db_client") is None:
self._db_client = get_db_client()
return self._db_client | ['def', '_db', '(', 'self', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', '"_db_client"', ')', 'or', 'getattr', '(', 'self', ',', '"_db_client"', ')', 'is', 'None', ':', 'self', '.', '_db_client', '=', 'get_db_client', '(', ')', 'return', 'self', '.', '_db_client'] | Database client for accessing storage.
:returns: :class:`livebridge.storages.base.BaseStorage` | ['Database', 'client', 'for', 'accessing', 'storage', '.'] | train | https://github.com/dpa-newslab/livebridge/blob/d930e887faa2f882d15b574f0f1fe4a580d7c5fa/livebridge/base/sources.py#L38-L44 |
9,869 | roclark/sportsreference | sportsreference/nfl/boxscore.py | Boxscore.home_abbreviation | def home_abbreviation(self):
"""
Returns a ``string`` of the home team's abbreviation, such as 'KAN'.
"""
abbr = re.sub(r'.*/teams/', '', str(self._home_name))
abbr = re.sub(r'/.*', '', abbr)
return abbr | python | def home_abbreviation(self):
"""
Returns a ``string`` of the home team's abbreviation, such as 'KAN'.
"""
abbr = re.sub(r'.*/teams/', '', str(self._home_name))
abbr = re.sub(r'/.*', '', abbr)
return abbr | ['def', 'home_abbreviation', '(', 'self', ')', ':', 'abbr', '=', 're', '.', 'sub', '(', "r'.*/teams/'", ',', "''", ',', 'str', '(', 'self', '.', '_home_name', ')', ')', 'abbr', '=', 're', '.', 'sub', '(', "r'/.*'", ',', "''", ',', 'abbr', ')', 'return', 'abbr'] | Returns a ``string`` of the home team's abbreviation, such as 'KAN'. | ['Returns', 'a', 'string', 'of', 'the', 'home', 'team', 's', 'abbreviation', 'such', 'as', 'KAN', '.'] | train | https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nfl/boxscore.py#L746-L752 |
9,870 | openstack/proliantutils | proliantutils/hpssa/objects.py | _get_key_value | def _get_key_value(string):
"""Return the (key, value) as a tuple from a string."""
# Normally all properties look like this:
# Unique Identifier: 600508B1001CE4ACF473EE9C826230FF
# Disk Name: /dev/sda
# Mount Points: None
key = ''
value = ''
try:
key, value = string.split(': ')
except ValueError:
# This handles the case when the property of a logical drive
# returned is as follows. Here we cannot split by ':' because
# the disk id has colon in it. So if this is about disk,
# then strip it accordingly.
# Mirror Group 0: physicaldrive 6I:1:5
string = string.lstrip(' ')
if string.startswith('physicaldrive'):
fields = string.split(' ')
# Include fields[1] to key to avoid duplicate pairs
# with the same 'physicaldrive' key
key = fields[0] + " " + fields[1]
value = fields[1]
else:
# TODO(rameshg87): Check if this ever occurs.
return string.strip(' '), None
return key.strip(' '), value.strip(' ') | python | def _get_key_value(string):
"""Return the (key, value) as a tuple from a string."""
# Normally all properties look like this:
# Unique Identifier: 600508B1001CE4ACF473EE9C826230FF
# Disk Name: /dev/sda
# Mount Points: None
key = ''
value = ''
try:
key, value = string.split(': ')
except ValueError:
# This handles the case when the property of a logical drive
# returned is as follows. Here we cannot split by ':' because
# the disk id has colon in it. So if this is about disk,
# then strip it accordingly.
# Mirror Group 0: physicaldrive 6I:1:5
string = string.lstrip(' ')
if string.startswith('physicaldrive'):
fields = string.split(' ')
# Include fields[1] to key to avoid duplicate pairs
# with the same 'physicaldrive' key
key = fields[0] + " " + fields[1]
value = fields[1]
else:
# TODO(rameshg87): Check if this ever occurs.
return string.strip(' '), None
return key.strip(' '), value.strip(' ') | ['def', '_get_key_value', '(', 'string', ')', ':', '# Normally all properties look like this:', '# Unique Identifier: 600508B1001CE4ACF473EE9C826230FF', '# Disk Name: /dev/sda', '# Mount Points: None', 'key', '=', "''", 'value', '=', "''", 'try', ':', 'key', ',', 'value', '=', 'string', '.', 'split', '(', "': '", ')', 'except', 'ValueError', ':', '# This handles the case when the property of a logical drive', "# returned is as follows. Here we cannot split by ':' because", '# the disk id has colon in it. So if this is about disk,', '# then strip it accordingly.', '# Mirror Group 0: physicaldrive 6I:1:5', 'string', '=', 'string', '.', 'lstrip', '(', "' '", ')', 'if', 'string', '.', 'startswith', '(', "'physicaldrive'", ')', ':', 'fields', '=', 'string', '.', 'split', '(', "' '", ')', '# Include fields[1] to key to avoid duplicate pairs', "# with the same 'physicaldrive' key", 'key', '=', 'fields', '[', '0', ']', '+', '" "', '+', 'fields', '[', '1', ']', 'value', '=', 'fields', '[', '1', ']', 'else', ':', '# TODO(rameshg87): Check if this ever occurs.', 'return', 'string', '.', 'strip', '(', "' '", ')', ',', 'None', 'return', 'key', '.', 'strip', '(', "' '", ')', ',', 'value', '.', 'strip', '(', "' '", ')'] | Return the (key, value) as a tuple from a string. | ['Return', 'the', '(', 'key', 'value', ')', 'as', 'a', 'tuple', 'from', 'a', 'string', '.'] | train | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L35-L62 |
9,871 | jbeluch/xbmcswift2 | xbmcswift2/xbmcmixin.py | XBMCMixin.keyboard | def keyboard(self, default=None, heading=None, hidden=False):
'''Displays the keyboard input window to the user. If the user does not
cancel the modal, the value entered by the user will be returned.
:param default: The placeholder text used to prepopulate the input field.
:param heading: The heading for the window. Defaults to the current
addon's name. If you require a blank heading, pass an
empty string.
:param hidden: Whether or not the input field should be masked with
stars, e.g. a password field.
'''
if heading is None:
heading = self.addon.getAddonInfo('name')
if default is None:
default = ''
keyboard = xbmc.Keyboard(default, heading, hidden)
keyboard.doModal()
if keyboard.isConfirmed():
return keyboard.getText() | python | def keyboard(self, default=None, heading=None, hidden=False):
'''Displays the keyboard input window to the user. If the user does not
cancel the modal, the value entered by the user will be returned.
:param default: The placeholder text used to prepopulate the input field.
:param heading: The heading for the window. Defaults to the current
addon's name. If you require a blank heading, pass an
empty string.
:param hidden: Whether or not the input field should be masked with
stars, e.g. a password field.
'''
if heading is None:
heading = self.addon.getAddonInfo('name')
if default is None:
default = ''
keyboard = xbmc.Keyboard(default, heading, hidden)
keyboard.doModal()
if keyboard.isConfirmed():
return keyboard.getText() | ['def', 'keyboard', '(', 'self', ',', 'default', '=', 'None', ',', 'heading', '=', 'None', ',', 'hidden', '=', 'False', ')', ':', 'if', 'heading', 'is', 'None', ':', 'heading', '=', 'self', '.', 'addon', '.', 'getAddonInfo', '(', "'name'", ')', 'if', 'default', 'is', 'None', ':', 'default', '=', "''", 'keyboard', '=', 'xbmc', '.', 'Keyboard', '(', 'default', ',', 'heading', ',', 'hidden', ')', 'keyboard', '.', 'doModal', '(', ')', 'if', 'keyboard', '.', 'isConfirmed', '(', ')', ':', 'return', 'keyboard', '.', 'getText', '(', ')'] | Displays the keyboard input window to the user. If the user does not
cancel the modal, the value entered by the user will be returned.
:param default: The placeholder text used to prepopulate the input field.
:param heading: The heading for the window. Defaults to the current
addon's name. If you require a blank heading, pass an
empty string.
:param hidden: Whether or not the input field should be masked with
stars, e.g. a password field. | ['Displays', 'the', 'keyboard', 'input', 'window', 'to', 'the', 'user', '.', 'If', 'the', 'user', 'does', 'not', 'cancel', 'the', 'modal', 'the', 'value', 'entered', 'by', 'the', 'user', 'will', 'be', 'returned', '.'] | train | https://github.com/jbeluch/xbmcswift2/blob/0e7a3642499554edc8265fdf1ba6c5ee567daa78/xbmcswift2/xbmcmixin.py#L269-L287 |
9,872 | rlisagor/freshen | freshen/core.py | StepsRunner.run_steps_from_string | def run_steps_from_string(self, spec, language_name='en'):
""" Called from within step definitions to run other steps. """
caller = inspect.currentframe().f_back
line = caller.f_lineno - 1
fname = caller.f_code.co_filename
steps = parse_steps(spec, fname, line, load_language(language_name))
for s in steps:
self.run_step(s) | python | def run_steps_from_string(self, spec, language_name='en'):
""" Called from within step definitions to run other steps. """
caller = inspect.currentframe().f_back
line = caller.f_lineno - 1
fname = caller.f_code.co_filename
steps = parse_steps(spec, fname, line, load_language(language_name))
for s in steps:
self.run_step(s) | ['def', 'run_steps_from_string', '(', 'self', ',', 'spec', ',', 'language_name', '=', "'en'", ')', ':', 'caller', '=', 'inspect', '.', 'currentframe', '(', ')', '.', 'f_back', 'line', '=', 'caller', '.', 'f_lineno', '-', '1', 'fname', '=', 'caller', '.', 'f_code', '.', 'co_filename', 'steps', '=', 'parse_steps', '(', 'spec', ',', 'fname', ',', 'line', ',', 'load_language', '(', 'language_name', ')', ')', 'for', 's', 'in', 'steps', ':', 'self', '.', 'run_step', '(', 's', ')'] | Called from within step definitions to run other steps. | ['Called', 'from', 'within', 'step', 'definitions', 'to', 'run', 'other', 'steps', '.'] | train | https://github.com/rlisagor/freshen/blob/5578f7368e8d53b4cf51c589fb192090d3524968/freshen/core.py#L19-L28 |
9,873 | pyviz/holoviews | holoviews/core/spaces.py | DynamicMap.relabel | def relabel(self, label=None, group=None, depth=1):
"""Clone object and apply new group and/or label.
Applies relabeling to children up to the supplied depth.
Args:
label (str, optional): New label to apply to returned object
group (str, optional): New group to apply to returned object
depth (int, optional): Depth to which relabel will be applied
If applied to container allows applying relabeling to
contained objects up to the specified depth
Returns:
Returns relabelled object
"""
relabelled = super(DynamicMap, self).relabel(label, group, depth)
if depth > 0:
from ..util import Dynamic
def dynamic_relabel(obj, **dynkwargs):
return obj.relabel(group=group, label=label, depth=depth-1)
dmap = Dynamic(self, streams=self.streams, operation=dynamic_relabel)
dmap.data = relabelled.data
with util.disable_constant(dmap):
dmap.group = relabelled.group
dmap.label = relabelled.label
return dmap
return relabelled | python | def relabel(self, label=None, group=None, depth=1):
"""Clone object and apply new group and/or label.
Applies relabeling to children up to the supplied depth.
Args:
label (str, optional): New label to apply to returned object
group (str, optional): New group to apply to returned object
depth (int, optional): Depth to which relabel will be applied
If applied to container allows applying relabeling to
contained objects up to the specified depth
Returns:
Returns relabelled object
"""
relabelled = super(DynamicMap, self).relabel(label, group, depth)
if depth > 0:
from ..util import Dynamic
def dynamic_relabel(obj, **dynkwargs):
return obj.relabel(group=group, label=label, depth=depth-1)
dmap = Dynamic(self, streams=self.streams, operation=dynamic_relabel)
dmap.data = relabelled.data
with util.disable_constant(dmap):
dmap.group = relabelled.group
dmap.label = relabelled.label
return dmap
return relabelled | ['def', 'relabel', '(', 'self', ',', 'label', '=', 'None', ',', 'group', '=', 'None', ',', 'depth', '=', '1', ')', ':', 'relabelled', '=', 'super', '(', 'DynamicMap', ',', 'self', ')', '.', 'relabel', '(', 'label', ',', 'group', ',', 'depth', ')', 'if', 'depth', '>', '0', ':', 'from', '.', '.', 'util', 'import', 'Dynamic', 'def', 'dynamic_relabel', '(', 'obj', ',', '*', '*', 'dynkwargs', ')', ':', 'return', 'obj', '.', 'relabel', '(', 'group', '=', 'group', ',', 'label', '=', 'label', ',', 'depth', '=', 'depth', '-', '1', ')', 'dmap', '=', 'Dynamic', '(', 'self', ',', 'streams', '=', 'self', '.', 'streams', ',', 'operation', '=', 'dynamic_relabel', ')', 'dmap', '.', 'data', '=', 'relabelled', '.', 'data', 'with', 'util', '.', 'disable_constant', '(', 'dmap', ')', ':', 'dmap', '.', 'group', '=', 'relabelled', '.', 'group', 'dmap', '.', 'label', '=', 'relabelled', '.', 'label', 'return', 'dmap', 'return', 'relabelled'] | Clone object and apply new group and/or label.
Applies relabeling to children up to the supplied depth.
Args:
label (str, optional): New label to apply to returned object
group (str, optional): New group to apply to returned object
depth (int, optional): Depth to which relabel will be applied
If applied to container allows applying relabeling to
contained objects up to the specified depth
Returns:
Returns relabelled object | ['Clone', 'object', 'and', 'apply', 'new', 'group', 'and', '/', 'or', 'label', '.'] | train | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/spaces.py#L1435-L1461 |
9,874 | bitesofcode/projexui | projexui/widgets/xcalendarwidget/xcalendaritem.py | XCalendarItem.paint | def paint( self, painter, option, widget ):
"""
Paints this item on the painter.
:param painter | <QPainter>
option | <QStyleOptionGraphicsItem>
widget | <QWidget>
"""
if ( self._rebuildRequired ):
self.rebuild()
# set the coloring options
painter.setPen(self.borderColor())
if ( self.isSelected() ):
painter.setBrush(self.highlightColor())
else:
painter.setBrush(self.fillColor())
hints = painter.renderHints()
if ( not self.isAllDay() ):
painter.setRenderHint(painter.Antialiasing)
pen = painter.pen()
pen.setWidthF(0.25)
painter.setPen(pen)
painter.drawPath(self.path())
# draw the text in the different rect locations
title = self.title()
painter.setPen(self.textColor())
for data in self._textData:
painter.drawText(*data)
painter.setRenderHints(hints) | python | def paint( self, painter, option, widget ):
"""
Paints this item on the painter.
:param painter | <QPainter>
option | <QStyleOptionGraphicsItem>
widget | <QWidget>
"""
if ( self._rebuildRequired ):
self.rebuild()
# set the coloring options
painter.setPen(self.borderColor())
if ( self.isSelected() ):
painter.setBrush(self.highlightColor())
else:
painter.setBrush(self.fillColor())
hints = painter.renderHints()
if ( not self.isAllDay() ):
painter.setRenderHint(painter.Antialiasing)
pen = painter.pen()
pen.setWidthF(0.25)
painter.setPen(pen)
painter.drawPath(self.path())
# draw the text in the different rect locations
title = self.title()
painter.setPen(self.textColor())
for data in self._textData:
painter.drawText(*data)
painter.setRenderHints(hints) | ['def', 'paint', '(', 'self', ',', 'painter', ',', 'option', ',', 'widget', ')', ':', 'if', '(', 'self', '.', '_rebuildRequired', ')', ':', 'self', '.', 'rebuild', '(', ')', '# set the coloring options\r', 'painter', '.', 'setPen', '(', 'self', '.', 'borderColor', '(', ')', ')', 'if', '(', 'self', '.', 'isSelected', '(', ')', ')', ':', 'painter', '.', 'setBrush', '(', 'self', '.', 'highlightColor', '(', ')', ')', 'else', ':', 'painter', '.', 'setBrush', '(', 'self', '.', 'fillColor', '(', ')', ')', 'hints', '=', 'painter', '.', 'renderHints', '(', ')', 'if', '(', 'not', 'self', '.', 'isAllDay', '(', ')', ')', ':', 'painter', '.', 'setRenderHint', '(', 'painter', '.', 'Antialiasing', ')', 'pen', '=', 'painter', '.', 'pen', '(', ')', 'pen', '.', 'setWidthF', '(', '0.25', ')', 'painter', '.', 'setPen', '(', 'pen', ')', 'painter', '.', 'drawPath', '(', 'self', '.', 'path', '(', ')', ')', '# draw the text in the different rect locations\r', 'title', '=', 'self', '.', 'title', '(', ')', 'painter', '.', 'setPen', '(', 'self', '.', 'textColor', '(', ')', ')', 'for', 'data', 'in', 'self', '.', '_textData', ':', 'painter', '.', 'drawText', '(', '*', 'data', ')', 'painter', '.', 'setRenderHints', '(', 'hints', ')'] | Paints this item on the painter.
:param painter | <QPainter>
option | <QStyleOptionGraphicsItem>
widget | <QWidget> | ['Paints', 'this', 'item', 'on', 'the', 'painter', '.', ':', 'param', 'painter', '|', '<QPainter', '>', 'option', '|', '<QStyleOptionGraphicsItem', '>', 'widget', '|', '<QWidget', '>'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcalendarwidget/xcalendaritem.py#L204-L238 |
9,875 | MonashBI/arcana | arcana/repository/tree.py | Tree.find_mismatch | def find_mismatch(self, other, indent=''):
"""
Used in debugging unittests
"""
mismatch = super(Tree, self).find_mismatch(other, indent)
sub_indent = indent + ' '
if len(list(self.subjects)) != len(list(other.subjects)):
mismatch += ('\n{indent}mismatching subject lengths '
'(self={} vs other={}): '
'\n{indent} self={}\n{indent} other={}'
.format(len(list(self.subjects)),
len(list(other.subjects)),
list(self.subjects),
list(other.subjects),
indent=sub_indent))
else:
for s, o in zip(self.subjects, other.subjects):
mismatch += s.find_mismatch(o, indent=sub_indent)
if len(list(self.visits)) != len(list(other.visits)):
mismatch += ('\n{indent}mismatching visit lengths '
'(self={} vs other={}): '
'\n{indent} self={}\n{indent} other={}'
.format(len(list(self.visits)),
len(list(other.visits)),
list(self.visits),
list(other.visits),
indent=sub_indent))
else:
for s, o in zip(self.visits, other.visits):
mismatch += s.find_mismatch(o, indent=sub_indent)
return mismatch | python | def find_mismatch(self, other, indent=''):
"""
Used in debugging unittests
"""
mismatch = super(Tree, self).find_mismatch(other, indent)
sub_indent = indent + ' '
if len(list(self.subjects)) != len(list(other.subjects)):
mismatch += ('\n{indent}mismatching subject lengths '
'(self={} vs other={}): '
'\n{indent} self={}\n{indent} other={}'
.format(len(list(self.subjects)),
len(list(other.subjects)),
list(self.subjects),
list(other.subjects),
indent=sub_indent))
else:
for s, o in zip(self.subjects, other.subjects):
mismatch += s.find_mismatch(o, indent=sub_indent)
if len(list(self.visits)) != len(list(other.visits)):
mismatch += ('\n{indent}mismatching visit lengths '
'(self={} vs other={}): '
'\n{indent} self={}\n{indent} other={}'
.format(len(list(self.visits)),
len(list(other.visits)),
list(self.visits),
list(other.visits),
indent=sub_indent))
else:
for s, o in zip(self.visits, other.visits):
mismatch += s.find_mismatch(o, indent=sub_indent)
return mismatch | ['def', 'find_mismatch', '(', 'self', ',', 'other', ',', 'indent', '=', "''", ')', ':', 'mismatch', '=', 'super', '(', 'Tree', ',', 'self', ')', '.', 'find_mismatch', '(', 'other', ',', 'indent', ')', 'sub_indent', '=', 'indent', '+', "' '", 'if', 'len', '(', 'list', '(', 'self', '.', 'subjects', ')', ')', '!=', 'len', '(', 'list', '(', 'other', '.', 'subjects', ')', ')', ':', 'mismatch', '+=', '(', "'\\n{indent}mismatching subject lengths '", "'(self={} vs other={}): '", "'\\n{indent} self={}\\n{indent} other={}'", '.', 'format', '(', 'len', '(', 'list', '(', 'self', '.', 'subjects', ')', ')', ',', 'len', '(', 'list', '(', 'other', '.', 'subjects', ')', ')', ',', 'list', '(', 'self', '.', 'subjects', ')', ',', 'list', '(', 'other', '.', 'subjects', ')', ',', 'indent', '=', 'sub_indent', ')', ')', 'else', ':', 'for', 's', ',', 'o', 'in', 'zip', '(', 'self', '.', 'subjects', ',', 'other', '.', 'subjects', ')', ':', 'mismatch', '+=', 's', '.', 'find_mismatch', '(', 'o', ',', 'indent', '=', 'sub_indent', ')', 'if', 'len', '(', 'list', '(', 'self', '.', 'visits', ')', ')', '!=', 'len', '(', 'list', '(', 'other', '.', 'visits', ')', ')', ':', 'mismatch', '+=', '(', "'\\n{indent}mismatching visit lengths '", "'(self={} vs other={}): '", "'\\n{indent} self={}\\n{indent} other={}'", '.', 'format', '(', 'len', '(', 'list', '(', 'self', '.', 'visits', ')', ')', ',', 'len', '(', 'list', '(', 'other', '.', 'visits', ')', ')', ',', 'list', '(', 'self', '.', 'visits', ')', ',', 'list', '(', 'other', '.', 'visits', ')', ',', 'indent', '=', 'sub_indent', ')', ')', 'else', ':', 'for', 's', ',', 'o', 'in', 'zip', '(', 'self', '.', 'visits', ',', 'other', '.', 'visits', ')', ':', 'mismatch', '+=', 's', '.', 'find_mismatch', '(', 'o', ',', 'indent', '=', 'sub_indent', ')', 'return', 'mismatch'] | Used in debugging unittests | ['Used', 'in', 'debugging', 'unittests'] | train | https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L524-L554 |
9,876 | nicolargo/glances | glances/plugins/glances_fs.py | Plugin.update | def update(self):
"""Update the FS stats using the input method."""
# Init new stats
stats = self.get_init_value()
if self.input_method == 'local':
# Update stats using the standard system lib
# Grab the stats using the psutil disk_partitions
# If 'all'=False return physical devices only (e.g. hard disks, cd-rom drives, USB keys)
# and ignore all others (e.g. memory partitions such as /dev/shm)
try:
fs_stat = psutil.disk_partitions(all=False)
except UnicodeDecodeError:
return self.stats
# Optionnal hack to allow logicals mounts points (issue #448)
# Ex: Had to put 'allow=zfs' in the [fs] section of the conf file
# to allow zfs monitoring
for fstype in self.get_conf_value('allow'):
try:
fs_stat += [f for f in psutil.disk_partitions(all=True) if f.fstype.find(fstype) >= 0]
except UnicodeDecodeError:
return self.stats
# Loop over fs
for fs in fs_stat:
# Do not take hidden file system into account
if self.is_hide(fs.mountpoint):
continue
# Grab the disk usage
try:
fs_usage = psutil.disk_usage(fs.mountpoint)
except OSError:
# Correct issue #346
# Disk is ejected during the command
continue
fs_current = {
'device_name': fs.device,
'fs_type': fs.fstype,
# Manage non breaking space (see issue #1065)
'mnt_point': u(fs.mountpoint).replace(u'\u00A0', ' '),
'size': fs_usage.total,
'used': fs_usage.used,
'free': fs_usage.free,
'percent': fs_usage.percent,
'key': self.get_key()}
stats.append(fs_current)
elif self.input_method == 'snmp':
# Update stats using SNMP
# SNMP bulk command to get all file system in one shot
try:
fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
bulk=True)
except KeyError:
fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid['default'],
bulk=True)
# Loop over fs
if self.short_system_name in ('windows', 'esxi'):
# Windows or ESXi tips
for fs in fs_stat:
# Memory stats are grabbed in the same OID table (ignore it)
if fs == 'Virtual Memory' or fs == 'Physical Memory' or fs == 'Real Memory':
continue
size = int(fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit'])
used = int(fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit'])
percent = float(used * 100 / size)
fs_current = {
'device_name': '',
'mnt_point': fs.partition(' ')[0],
'size': size,
'used': used,
'percent': percent,
'key': self.get_key()}
stats.append(fs_current)
else:
# Default behavior
for fs in fs_stat:
fs_current = {
'device_name': fs_stat[fs]['device_name'],
'mnt_point': fs,
'size': int(fs_stat[fs]['size']) * 1024,
'used': int(fs_stat[fs]['used']) * 1024,
'percent': float(fs_stat[fs]['percent']),
'key': self.get_key()}
stats.append(fs_current)
# Update the stats
self.stats = stats
return self.stats | python | def update(self):
"""Update the FS stats using the input method."""
# Init new stats
stats = self.get_init_value()
if self.input_method == 'local':
# Update stats using the standard system lib
# Grab the stats using the psutil disk_partitions
# If 'all'=False return physical devices only (e.g. hard disks, cd-rom drives, USB keys)
# and ignore all others (e.g. memory partitions such as /dev/shm)
try:
fs_stat = psutil.disk_partitions(all=False)
except UnicodeDecodeError:
return self.stats
# Optionnal hack to allow logicals mounts points (issue #448)
# Ex: Had to put 'allow=zfs' in the [fs] section of the conf file
# to allow zfs monitoring
for fstype in self.get_conf_value('allow'):
try:
fs_stat += [f for f in psutil.disk_partitions(all=True) if f.fstype.find(fstype) >= 0]
except UnicodeDecodeError:
return self.stats
# Loop over fs
for fs in fs_stat:
# Do not take hidden file system into account
if self.is_hide(fs.mountpoint):
continue
# Grab the disk usage
try:
fs_usage = psutil.disk_usage(fs.mountpoint)
except OSError:
# Correct issue #346
# Disk is ejected during the command
continue
fs_current = {
'device_name': fs.device,
'fs_type': fs.fstype,
# Manage non breaking space (see issue #1065)
'mnt_point': u(fs.mountpoint).replace(u'\u00A0', ' '),
'size': fs_usage.total,
'used': fs_usage.used,
'free': fs_usage.free,
'percent': fs_usage.percent,
'key': self.get_key()}
stats.append(fs_current)
elif self.input_method == 'snmp':
# Update stats using SNMP
# SNMP bulk command to get all file system in one shot
try:
fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
bulk=True)
except KeyError:
fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid['default'],
bulk=True)
# Loop over fs
if self.short_system_name in ('windows', 'esxi'):
# Windows or ESXi tips
for fs in fs_stat:
# Memory stats are grabbed in the same OID table (ignore it)
if fs == 'Virtual Memory' or fs == 'Physical Memory' or fs == 'Real Memory':
continue
size = int(fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit'])
used = int(fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit'])
percent = float(used * 100 / size)
fs_current = {
'device_name': '',
'mnt_point': fs.partition(' ')[0],
'size': size,
'used': used,
'percent': percent,
'key': self.get_key()}
stats.append(fs_current)
else:
# Default behavior
for fs in fs_stat:
fs_current = {
'device_name': fs_stat[fs]['device_name'],
'mnt_point': fs,
'size': int(fs_stat[fs]['size']) * 1024,
'used': int(fs_stat[fs]['used']) * 1024,
'percent': float(fs_stat[fs]['percent']),
'key': self.get_key()}
stats.append(fs_current)
# Update the stats
self.stats = stats
return self.stats | ['def', 'update', '(', 'self', ')', ':', '# Init new stats', 'stats', '=', 'self', '.', 'get_init_value', '(', ')', 'if', 'self', '.', 'input_method', '==', "'local'", ':', '# Update stats using the standard system lib', '# Grab the stats using the psutil disk_partitions', "# If 'all'=False return physical devices only (e.g. hard disks, cd-rom drives, USB keys)", '# and ignore all others (e.g. memory partitions such as /dev/shm)', 'try', ':', 'fs_stat', '=', 'psutil', '.', 'disk_partitions', '(', 'all', '=', 'False', ')', 'except', 'UnicodeDecodeError', ':', 'return', 'self', '.', 'stats', '# Optionnal hack to allow logicals mounts points (issue #448)', "# Ex: Had to put 'allow=zfs' in the [fs] section of the conf file", '# to allow zfs monitoring', 'for', 'fstype', 'in', 'self', '.', 'get_conf_value', '(', "'allow'", ')', ':', 'try', ':', 'fs_stat', '+=', '[', 'f', 'for', 'f', 'in', 'psutil', '.', 'disk_partitions', '(', 'all', '=', 'True', ')', 'if', 'f', '.', 'fstype', '.', 'find', '(', 'fstype', ')', '>=', '0', ']', 'except', 'UnicodeDecodeError', ':', 'return', 'self', '.', 'stats', '# Loop over fs', 'for', 'fs', 'in', 'fs_stat', ':', '# Do not take hidden file system into account', 'if', 'self', '.', 'is_hide', '(', 'fs', '.', 'mountpoint', ')', ':', 'continue', '# Grab the disk usage', 'try', ':', 'fs_usage', '=', 'psutil', '.', 'disk_usage', '(', 'fs', '.', 'mountpoint', ')', 'except', 'OSError', ':', '# Correct issue #346', '# Disk is ejected during the command', 'continue', 'fs_current', '=', '{', "'device_name'", ':', 'fs', '.', 'device', ',', "'fs_type'", ':', 'fs', '.', 'fstype', ',', '# Manage non breaking space (see issue #1065)', "'mnt_point'", ':', 'u', '(', 'fs', '.', 'mountpoint', ')', '.', 'replace', '(', "u'\\u00A0'", ',', "' '", ')', ',', "'size'", ':', 'fs_usage', '.', 'total', ',', "'used'", ':', 'fs_usage', '.', 'used', ',', "'free'", ':', 'fs_usage', '.', 'free', ',', "'percent'", ':', 'fs_usage', '.', 'percent', ',', "'key'", ':', 'self', '.', 'get_key', '(', ')', '}', 'stats', '.', 'append', '(', 'fs_current', ')', 'elif', 'self', '.', 'input_method', '==', "'snmp'", ':', '# Update stats using SNMP', '# SNMP bulk command to get all file system in one shot', 'try', ':', 'fs_stat', '=', 'self', '.', 'get_stats_snmp', '(', 'snmp_oid', '=', 'snmp_oid', '[', 'self', '.', 'short_system_name', ']', ',', 'bulk', '=', 'True', ')', 'except', 'KeyError', ':', 'fs_stat', '=', 'self', '.', 'get_stats_snmp', '(', 'snmp_oid', '=', 'snmp_oid', '[', "'default'", ']', ',', 'bulk', '=', 'True', ')', '# Loop over fs', 'if', 'self', '.', 'short_system_name', 'in', '(', "'windows'", ',', "'esxi'", ')', ':', '# Windows or ESXi tips', 'for', 'fs', 'in', 'fs_stat', ':', '# Memory stats are grabbed in the same OID table (ignore it)', 'if', 'fs', '==', "'Virtual Memory'", 'or', 'fs', '==', "'Physical Memory'", 'or', 'fs', '==', "'Real Memory'", ':', 'continue', 'size', '=', 'int', '(', 'fs_stat', '[', 'fs', ']', '[', "'size'", ']', ')', '*', 'int', '(', 'fs_stat', '[', 'fs', ']', '[', "'alloc_unit'", ']', ')', 'used', '=', 'int', '(', 'fs_stat', '[', 'fs', ']', '[', "'used'", ']', ')', '*', 'int', '(', 'fs_stat', '[', 'fs', ']', '[', "'alloc_unit'", ']', ')', 'percent', '=', 'float', '(', 'used', '*', '100', '/', 'size', ')', 'fs_current', '=', '{', "'device_name'", ':', "''", ',', "'mnt_point'", ':', 'fs', '.', 'partition', '(', "' '", ')', '[', '0', ']', ',', "'size'", ':', 'size', ',', "'used'", ':', 'used', ',', "'percent'", ':', 'percent', ',', "'key'", ':', 'self', '.', 'get_key', '(', ')', '}', 'stats', '.', 'append', '(', 'fs_current', ')', 'else', ':', '# Default behavior', 'for', 'fs', 'in', 'fs_stat', ':', 'fs_current', '=', '{', "'device_name'", ':', 'fs_stat', '[', 'fs', ']', '[', "'device_name'", ']', ',', "'mnt_point'", ':', 'fs', ',', "'size'", ':', 'int', '(', 'fs_stat', '[', 'fs', ']', '[', "'size'", ']', ')', '*', '1024', ',', "'used'", ':', 'int', '(', 'fs_stat', '[', 'fs', ']', '[', "'used'", ']', ')', '*', '1024', ',', "'percent'", ':', 'float', '(', 'fs_stat', '[', 'fs', ']', '[', "'percent'", ']', ')', ',', "'key'", ':', 'self', '.', 'get_key', '(', ')', '}', 'stats', '.', 'append', '(', 'fs_current', ')', '# Update the stats', 'self', '.', 'stats', '=', 'stats', 'return', 'self', '.', 'stats'] | Update the FS stats using the input method. | ['Update', 'the', 'FS', 'stats', 'using', 'the', 'input', 'method', '.'] | train | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_fs.py#L88-L181 |
9,877 | python-odin/odinweb | odinweb/signing.py | verify_url | def verify_url(url, secret_key, **kwargs):
"""
Verify a signed URL (excluding the domain and scheme).
:param url: URL to sign
:param secret_key: Secret key
:rtype: bool
:raises: URLError
"""
result = urlparse(url)
query_args = MultiValueDict(parse_qs(result.query))
return verify_url_path(result.path, query_args, secret_key, **kwargs) | python | def verify_url(url, secret_key, **kwargs):
"""
Verify a signed URL (excluding the domain and scheme).
:param url: URL to sign
:param secret_key: Secret key
:rtype: bool
:raises: URLError
"""
result = urlparse(url)
query_args = MultiValueDict(parse_qs(result.query))
return verify_url_path(result.path, query_args, secret_key, **kwargs) | ['def', 'verify_url', '(', 'url', ',', 'secret_key', ',', '*', '*', 'kwargs', ')', ':', 'result', '=', 'urlparse', '(', 'url', ')', 'query_args', '=', 'MultiValueDict', '(', 'parse_qs', '(', 'result', '.', 'query', ')', ')', 'return', 'verify_url_path', '(', 'result', '.', 'path', ',', 'query_args', ',', 'secret_key', ',', '*', '*', 'kwargs', ')'] | Verify a signed URL (excluding the domain and scheme).
:param url: URL to sign
:param secret_key: Secret key
:rtype: bool
:raises: URLError | ['Verify', 'a', 'signed', 'URL', '(', 'excluding', 'the', 'domain', 'and', 'scheme', ')', '.'] | train | https://github.com/python-odin/odinweb/blob/198424133584acc18cb41c8d18d91f803abc810f/odinweb/signing.py#L118-L130 |
9,878 | simon-anders/htseq | python2/HTSeq/__init__.py | parse_GFF_attribute_string | def parse_GFF_attribute_string(attrStr, extra_return_first_value=False):
"""Parses a GFF attribute string and returns it as a dictionary.
If 'extra_return_first_value' is set, a pair is returned: the dictionary
and the value of the first attribute. This might be useful if this is the
ID.
"""
if attrStr.endswith("\n"):
attrStr = attrStr[:-1]
d = {}
first_val = "_unnamed_"
for (i, attr) in itertools.izip(
itertools.count(),
_HTSeq.quotesafe_split(attrStr)):
if _re_attr_empty.match(attr):
continue
if attr.count('"') not in (0, 2):
raise ValueError(
"The attribute string seems to contain mismatched quotes.")
mo = _re_attr_main.match(attr)
if not mo:
raise ValueError("Failure parsing GFF attribute line")
val = mo.group(2)
if val.startswith('"') and val.endswith('"'):
val = val[1:-1]
d[intern(mo.group(1))] = intern(val)
if extra_return_first_value and i == 0:
first_val = val
if extra_return_first_value:
return (d, first_val)
else:
return d | python | def parse_GFF_attribute_string(attrStr, extra_return_first_value=False):
"""Parses a GFF attribute string and returns it as a dictionary.
If 'extra_return_first_value' is set, a pair is returned: the dictionary
and the value of the first attribute. This might be useful if this is the
ID.
"""
if attrStr.endswith("\n"):
attrStr = attrStr[:-1]
d = {}
first_val = "_unnamed_"
for (i, attr) in itertools.izip(
itertools.count(),
_HTSeq.quotesafe_split(attrStr)):
if _re_attr_empty.match(attr):
continue
if attr.count('"') not in (0, 2):
raise ValueError(
"The attribute string seems to contain mismatched quotes.")
mo = _re_attr_main.match(attr)
if not mo:
raise ValueError("Failure parsing GFF attribute line")
val = mo.group(2)
if val.startswith('"') and val.endswith('"'):
val = val[1:-1]
d[intern(mo.group(1))] = intern(val)
if extra_return_first_value and i == 0:
first_val = val
if extra_return_first_value:
return (d, first_val)
else:
return d | ['def', 'parse_GFF_attribute_string', '(', 'attrStr', ',', 'extra_return_first_value', '=', 'False', ')', ':', 'if', 'attrStr', '.', 'endswith', '(', '"\\n"', ')', ':', 'attrStr', '=', 'attrStr', '[', ':', '-', '1', ']', 'd', '=', '{', '}', 'first_val', '=', '"_unnamed_"', 'for', '(', 'i', ',', 'attr', ')', 'in', 'itertools', '.', 'izip', '(', 'itertools', '.', 'count', '(', ')', ',', '_HTSeq', '.', 'quotesafe_split', '(', 'attrStr', ')', ')', ':', 'if', '_re_attr_empty', '.', 'match', '(', 'attr', ')', ':', 'continue', 'if', 'attr', '.', 'count', '(', '\'"\'', ')', 'not', 'in', '(', '0', ',', '2', ')', ':', 'raise', 'ValueError', '(', '"The attribute string seems to contain mismatched quotes."', ')', 'mo', '=', '_re_attr_main', '.', 'match', '(', 'attr', ')', 'if', 'not', 'mo', ':', 'raise', 'ValueError', '(', '"Failure parsing GFF attribute line"', ')', 'val', '=', 'mo', '.', 'group', '(', '2', ')', 'if', 'val', '.', 'startswith', '(', '\'"\'', ')', 'and', 'val', '.', 'endswith', '(', '\'"\'', ')', ':', 'val', '=', 'val', '[', '1', ':', '-', '1', ']', 'd', '[', 'intern', '(', 'mo', '.', 'group', '(', '1', ')', ')', ']', '=', 'intern', '(', 'val', ')', 'if', 'extra_return_first_value', 'and', 'i', '==', '0', ':', 'first_val', '=', 'val', 'if', 'extra_return_first_value', ':', 'return', '(', 'd', ',', 'first_val', ')', 'else', ':', 'return', 'd'] | Parses a GFF attribute string and returns it as a dictionary.
If 'extra_return_first_value' is set, a pair is returned: the dictionary
and the value of the first attribute. This might be useful if this is the
ID. | ['Parses', 'a', 'GFF', 'attribute', 'string', 'and', 'returns', 'it', 'as', 'a', 'dictionary', '.'] | train | https://github.com/simon-anders/htseq/blob/6f7d66e757e610228c33ebf2bb5dc8cc5051c7f0/python2/HTSeq/__init__.py#L144-L175 |
9,879 | Alignak-monitoring/alignak | alignak/objects/satellitelink.py | SatelliteLink.push_external_commands | def push_external_commands(self, commands):
"""Send a HTTP request to the satellite (POST /r_un_external_commands)
to send the external commands to the satellite
:param results: Results list to send
:type results: list
:return: True on success, False on failure
:rtype: bool
"""
logger.debug("Pushing %d external commands", len(commands))
return self.con.post('_run_external_commands', {'cmds': commands}, wait=True) | python | def push_external_commands(self, commands):
"""Send a HTTP request to the satellite (POST /r_un_external_commands)
to send the external commands to the satellite
:param results: Results list to send
:type results: list
:return: True on success, False on failure
:rtype: bool
"""
logger.debug("Pushing %d external commands", len(commands))
return self.con.post('_run_external_commands', {'cmds': commands}, wait=True) | ['def', 'push_external_commands', '(', 'self', ',', 'commands', ')', ':', 'logger', '.', 'debug', '(', '"Pushing %d external commands"', ',', 'len', '(', 'commands', ')', ')', 'return', 'self', '.', 'con', '.', 'post', '(', "'_run_external_commands'", ',', '{', "'cmds'", ':', 'commands', '}', ',', 'wait', '=', 'True', ')'] | Send a HTTP request to the satellite (POST /r_un_external_commands)
to send the external commands to the satellite
:param results: Results list to send
:type results: list
:return: True on success, False on failure
:rtype: bool | ['Send', 'a', 'HTTP', 'request', 'to', 'the', 'satellite', '(', 'POST', '/', 'r_un_external_commands', ')', 'to', 'send', 'the', 'external', 'commands', 'to', 'the', 'satellite'] | train | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L910-L920 |
9,880 | inveniosoftware/invenio-files-rest | invenio_files_rest/models.py | Part.get_or_none | def get_or_none(cls, mp, part_number):
"""Get part number."""
return cls.query.filter_by(
upload_id=mp.upload_id,
part_number=part_number
).one_or_none() | python | def get_or_none(cls, mp, part_number):
"""Get part number."""
return cls.query.filter_by(
upload_id=mp.upload_id,
part_number=part_number
).one_or_none() | ['def', 'get_or_none', '(', 'cls', ',', 'mp', ',', 'part_number', ')', ':', 'return', 'cls', '.', 'query', '.', 'filter_by', '(', 'upload_id', '=', 'mp', '.', 'upload_id', ',', 'part_number', '=', 'part_number', ')', '.', 'one_or_none', '(', ')'] | Get part number. | ['Get', 'part', 'number', '.'] | train | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/models.py#L1640-L1645 |
9,881 | quantmind/pulsar | examples/philosophers/manage.py | DiningPhilosophers.release_forks | async def release_forks(self, philosopher):
'''The ``philosopher`` has just eaten and is ready to release both
forks.
This method releases them, one by one, by sending the ``put_down``
action to the monitor.
'''
forks = self.forks
self.forks = []
self.started_waiting = 0
for fork in forks:
philosopher.logger.debug('Putting down fork %s', fork)
await philosopher.send('monitor', 'putdown_fork', fork)
await sleep(self.cfg.waiting_period) | python | async def release_forks(self, philosopher):
'''The ``philosopher`` has just eaten and is ready to release both
forks.
This method releases them, one by one, by sending the ``put_down``
action to the monitor.
'''
forks = self.forks
self.forks = []
self.started_waiting = 0
for fork in forks:
philosopher.logger.debug('Putting down fork %s', fork)
await philosopher.send('monitor', 'putdown_fork', fork)
await sleep(self.cfg.waiting_period) | ['async', 'def', 'release_forks', '(', 'self', ',', 'philosopher', ')', ':', 'forks', '=', 'self', '.', 'forks', 'self', '.', 'forks', '=', '[', ']', 'self', '.', 'started_waiting', '=', '0', 'for', 'fork', 'in', 'forks', ':', 'philosopher', '.', 'logger', '.', 'debug', '(', "'Putting down fork %s'", ',', 'fork', ')', 'await', 'philosopher', '.', 'send', '(', "'monitor'", ',', "'putdown_fork'", ',', 'fork', ')', 'await', 'sleep', '(', 'self', '.', 'cfg', '.', 'waiting_period', ')'] | The ``philosopher`` has just eaten and is ready to release both
forks.
This method releases them, one by one, by sending the ``put_down``
action to the monitor. | ['The', 'philosopher', 'has', 'just', 'eaten', 'and', 'is', 'ready', 'to', 'release', 'both', 'forks', '.'] | train | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/examples/philosophers/manage.py#L177-L190 |
9,882 | boriel/zxbasic | zxbparser.py | p_expr_BXOR_expr | def p_expr_BXOR_expr(p):
""" expr : expr BXOR expr
"""
p[0] = make_binary(p.lineno(2), 'BXOR', p[1], p[3], lambda x, y: x ^ y) | python | def p_expr_BXOR_expr(p):
""" expr : expr BXOR expr
"""
p[0] = make_binary(p.lineno(2), 'BXOR', p[1], p[3], lambda x, y: x ^ y) | ['def', 'p_expr_BXOR_expr', '(', 'p', ')', ':', 'p', '[', '0', ']', '=', 'make_binary', '(', 'p', '.', 'lineno', '(', '2', ')', ',', "'BXOR'", ',', 'p', '[', '1', ']', ',', 'p', '[', '3', ']', ',', 'lambda', 'x', ',', 'y', ':', 'x', '^', 'y', ')'] | expr : expr BXOR expr | ['expr', ':', 'expr', 'BXOR', 'expr'] | train | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L2331-L2334 |
9,883 | Metatab/metapack | metapack/rowgenerator.py | copy_reference | def copy_reference(resource, doc, env, *args, **kwargs):
"""A row-generating function that yields from a reference. This permits an upstream package to be
copied and modified by this package, while being formally referenced as a dependency
The function will generate rows from a reference that has the same name as the resource term
"""
yield from doc.reference(resource.name) | python | def copy_reference(resource, doc, env, *args, **kwargs):
"""A row-generating function that yields from a reference. This permits an upstream package to be
copied and modified by this package, while being formally referenced as a dependency
The function will generate rows from a reference that has the same name as the resource term
"""
yield from doc.reference(resource.name) | ['def', 'copy_reference', '(', 'resource', ',', 'doc', ',', 'env', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'yield', 'from', 'doc', '.', 'reference', '(', 'resource', '.', 'name', ')'] | A row-generating function that yields from a reference. This permits an upstream package to be
copied and modified by this package, while being formally referenced as a dependency
The function will generate rows from a reference that has the same name as the resource term | ['A', 'row', '-', 'generating', 'function', 'that', 'yields', 'from', 'a', 'reference', '.', 'This', 'permits', 'an', 'upstream', 'package', 'to', 'be', 'copied', 'and', 'modified', 'by', 'this', 'package', 'while', 'being', 'formally', 'referenced', 'as', 'a', 'dependency'] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/rowgenerator.py#L164-L171 |
9,884 | ipfs/py-ipfs-api | ipfsapi/client.py | Client.pin_rm | def pin_rm(self, path, *paths, **kwargs):
"""Removes a pinned object from local storage.
Removes the pin from the given object allowing it to be garbage
collected if needed.
.. code-block:: python
>>> c.pin_rm('QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d')
{'Pins': ['QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d']}
Parameters
----------
path : str
Path to object(s) to be unpinned
recursive : bool
Recursively unpin the object linked to by the specified object(s)
Returns
-------
dict : List of IPFS objects that have been unpinned
"""
#PY2: No support for kw-only parameters after glob parameters
if "recursive" in kwargs:
kwargs.setdefault("opts", {"recursive": kwargs["recursive"]})
del kwargs["recursive"]
args = (path,) + paths
return self._client.request('/pin/rm', args, decoder='json', **kwargs) | python | def pin_rm(self, path, *paths, **kwargs):
"""Removes a pinned object from local storage.
Removes the pin from the given object allowing it to be garbage
collected if needed.
.. code-block:: python
>>> c.pin_rm('QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d')
{'Pins': ['QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d']}
Parameters
----------
path : str
Path to object(s) to be unpinned
recursive : bool
Recursively unpin the object linked to by the specified object(s)
Returns
-------
dict : List of IPFS objects that have been unpinned
"""
#PY2: No support for kw-only parameters after glob parameters
if "recursive" in kwargs:
kwargs.setdefault("opts", {"recursive": kwargs["recursive"]})
del kwargs["recursive"]
args = (path,) + paths
return self._client.request('/pin/rm', args, decoder='json', **kwargs) | ['def', 'pin_rm', '(', 'self', ',', 'path', ',', '*', 'paths', ',', '*', '*', 'kwargs', ')', ':', '#PY2: No support for kw-only parameters after glob parameters', 'if', '"recursive"', 'in', 'kwargs', ':', 'kwargs', '.', 'setdefault', '(', '"opts"', ',', '{', '"recursive"', ':', 'kwargs', '[', '"recursive"', ']', '}', ')', 'del', 'kwargs', '[', '"recursive"', ']', 'args', '=', '(', 'path', ',', ')', '+', 'paths', 'return', 'self', '.', '_client', '.', 'request', '(', "'/pin/rm'", ',', 'args', ',', 'decoder', '=', "'json'", ',', '*', '*', 'kwargs', ')'] | Removes a pinned object from local storage.
Removes the pin from the given object allowing it to be garbage
collected if needed.
.. code-block:: python
>>> c.pin_rm('QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d')
{'Pins': ['QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d']}
Parameters
----------
path : str
Path to object(s) to be unpinned
recursive : bool
Recursively unpin the object linked to by the specified object(s)
Returns
-------
dict : List of IPFS objects that have been unpinned | ['Removes', 'a', 'pinned', 'object', 'from', 'local', 'storage', '.'] | train | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L1055-L1083 |
9,885 | kevinconway/iface | iface/decorators.py | property | def property(func):
"""Wrap a function as a property.
This differs from attribute by identifying properties explicitly listed
in the class definition rather than named attributes defined on instances
of a class at init time.
"""
attr = abc.abstractmethod(func)
attr.__iproperty__ = True
attr = Property(attr)
return attr | python | def property(func):
"""Wrap a function as a property.
This differs from attribute by identifying properties explicitly listed
in the class definition rather than named attributes defined on instances
of a class at init time.
"""
attr = abc.abstractmethod(func)
attr.__iproperty__ = True
attr = Property(attr)
return attr | ['def', 'property', '(', 'func', ')', ':', 'attr', '=', 'abc', '.', 'abstractmethod', '(', 'func', ')', 'attr', '.', '__iproperty__', '=', 'True', 'attr', '=', 'Property', '(', 'attr', ')', 'return', 'attr'] | Wrap a function as a property.
This differs from attribute by identifying properties explicitly listed
in the class definition rather than named attributes defined on instances
of a class at init time. | ['Wrap', 'a', 'function', 'as', 'a', 'property', '.'] | train | https://github.com/kevinconway/iface/blob/2687f7965eed155b9594a298ffa260a2f9f821f9/iface/decorators.py#L26-L36 |
9,886 | pandas-dev/pandas | pandas/tseries/holiday.py | weekend_to_monday | def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt | python | def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt | ['def', 'weekend_to_monday', '(', 'dt', ')', ':', 'if', 'dt', '.', 'weekday', '(', ')', '==', '6', ':', 'return', 'dt', '+', 'timedelta', '(', '1', ')', 'elif', 'dt', '.', 'weekday', '(', ')', '==', '5', ':', 'return', 'dt', '+', 'timedelta', '(', '2', ')', 'return', 'dt'] | If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe | ['If', 'holiday', 'falls', 'on', 'Sunday', 'or', 'Saturday', 'use', 'day', 'thereafter', '(', 'Monday', ')', 'instead', '.', 'Needed', 'for', 'holidays', 'such', 'as', 'Christmas', 'observation', 'in', 'Europe'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L62-L72 |
9,887 | aouyar/PyMunin | pysysinfo/squid.py | SquidInfo._parseCounters | def _parseCounters(self, data):
"""Parse simple stats list of key, value pairs.
@param data: Multiline data with one key-value pair in each line.
@return: Dictionary of stats.
"""
info_dict = util.NestedDict()
for line in data.splitlines():
mobj = re.match('^\s*([\w\.]+)\s*=\s*(\S.*)$', line)
if mobj:
(key, value) = mobj.groups()
klist = key.split('.')
info_dict.set_nested(klist, parse_value(value))
return info_dict | python | def _parseCounters(self, data):
"""Parse simple stats list of key, value pairs.
@param data: Multiline data with one key-value pair in each line.
@return: Dictionary of stats.
"""
info_dict = util.NestedDict()
for line in data.splitlines():
mobj = re.match('^\s*([\w\.]+)\s*=\s*(\S.*)$', line)
if mobj:
(key, value) = mobj.groups()
klist = key.split('.')
info_dict.set_nested(klist, parse_value(value))
return info_dict | ['def', '_parseCounters', '(', 'self', ',', 'data', ')', ':', 'info_dict', '=', 'util', '.', 'NestedDict', '(', ')', 'for', 'line', 'in', 'data', '.', 'splitlines', '(', ')', ':', 'mobj', '=', 're', '.', 'match', '(', "'^\\s*([\\w\\.]+)\\s*=\\s*(\\S.*)$'", ',', 'line', ')', 'if', 'mobj', ':', '(', 'key', ',', 'value', ')', '=', 'mobj', '.', 'groups', '(', ')', 'klist', '=', 'key', '.', 'split', '(', "'.'", ')', 'info_dict', '.', 'set_nested', '(', 'klist', ',', 'parse_value', '(', 'value', ')', ')', 'return', 'info_dict'] | Parse simple stats list of key, value pairs.
@param data: Multiline data with one key-value pair in each line.
@return: Dictionary of stats. | ['Parse', 'simple', 'stats', 'list', 'of', 'key', 'value', 'pairs', '.'] | train | https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/squid.py#L117-L131 |
9,888 | trezor/python-trezor | trezorlib/cosi.py | sign_with_privkey | def sign_with_privkey(
digest: bytes,
privkey: Ed25519PrivateKey,
global_pubkey: Ed25519PublicPoint,
nonce: int,
global_commit: Ed25519PublicPoint,
) -> Ed25519Signature:
"""Create a CoSi signature of `digest` with the supplied private key.
This function needs to know the global public key and global commitment.
"""
h = _ed25519.H(privkey)
a = _ed25519.decodecoord(h)
S = (nonce + _ed25519.Hint(global_commit + global_pubkey + digest) * a) % _ed25519.l
return Ed25519Signature(_ed25519.encodeint(S)) | python | def sign_with_privkey(
digest: bytes,
privkey: Ed25519PrivateKey,
global_pubkey: Ed25519PublicPoint,
nonce: int,
global_commit: Ed25519PublicPoint,
) -> Ed25519Signature:
"""Create a CoSi signature of `digest` with the supplied private key.
This function needs to know the global public key and global commitment.
"""
h = _ed25519.H(privkey)
a = _ed25519.decodecoord(h)
S = (nonce + _ed25519.Hint(global_commit + global_pubkey + digest) * a) % _ed25519.l
return Ed25519Signature(_ed25519.encodeint(S)) | ['def', 'sign_with_privkey', '(', 'digest', ':', 'bytes', ',', 'privkey', ':', 'Ed25519PrivateKey', ',', 'global_pubkey', ':', 'Ed25519PublicPoint', ',', 'nonce', ':', 'int', ',', 'global_commit', ':', 'Ed25519PublicPoint', ',', ')', '->', 'Ed25519Signature', ':', 'h', '=', '_ed25519', '.', 'H', '(', 'privkey', ')', 'a', '=', '_ed25519', '.', 'decodecoord', '(', 'h', ')', 'S', '=', '(', 'nonce', '+', '_ed25519', '.', 'Hint', '(', 'global_commit', '+', 'global_pubkey', '+', 'digest', ')', '*', 'a', ')', '%', '_ed25519', '.', 'l', 'return', 'Ed25519Signature', '(', '_ed25519', '.', 'encodeint', '(', 'S', ')', ')'] | Create a CoSi signature of `digest` with the supplied private key.
This function needs to know the global public key and global commitment. | ['Create', 'a', 'CoSi', 'signature', 'of', 'digest', 'with', 'the', 'supplied', 'private', 'key', '.', 'This', 'function', 'needs', 'to', 'know', 'the', 'global', 'public', 'key', 'and', 'global', 'commitment', '.'] | train | https://github.com/trezor/python-trezor/blob/2813522b05cef4e0e545a101f8b3559a3183b45b/trezorlib/cosi.py#L104-L118 |
9,889 | woolfson-group/isambard | isambard/optimisation/optimizer.py | rmsd_eval | def rmsd_eval(rmsd_params):
"""Builds a model and runs profit against a reference model.
Parameters
----------
rmsd_params
Returns
-------
rmsd: float
rmsd against reference model as calculated by profit.
"""
specification, sequence, parsed_ind, reference_pdb = rmsd_params
model = specification(*parsed_ind)
model.pack_new_sequences(sequence)
ca, bb, aa = run_profit(model.pdb, reference_pdb, path1=False, path2=False)
return bb | python | def rmsd_eval(rmsd_params):
"""Builds a model and runs profit against a reference model.
Parameters
----------
rmsd_params
Returns
-------
rmsd: float
rmsd against reference model as calculated by profit.
"""
specification, sequence, parsed_ind, reference_pdb = rmsd_params
model = specification(*parsed_ind)
model.pack_new_sequences(sequence)
ca, bb, aa = run_profit(model.pdb, reference_pdb, path1=False, path2=False)
return bb | ['def', 'rmsd_eval', '(', 'rmsd_params', ')', ':', 'specification', ',', 'sequence', ',', 'parsed_ind', ',', 'reference_pdb', '=', 'rmsd_params', 'model', '=', 'specification', '(', '*', 'parsed_ind', ')', 'model', '.', 'pack_new_sequences', '(', 'sequence', ')', 'ca', ',', 'bb', ',', 'aa', '=', 'run_profit', '(', 'model', '.', 'pdb', ',', 'reference_pdb', ',', 'path1', '=', 'False', ',', 'path2', '=', 'False', ')', 'return', 'bb'] | Builds a model and runs profit against a reference model.
Parameters
----------
rmsd_params
Returns
-------
rmsd: float
rmsd against reference model as calculated by profit. | ['Builds', 'a', 'model', 'and', 'runs', 'profit', 'against', 'a', 'reference', 'model', '.'] | train | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/optimisation/optimizer.py#L70-L86 |
9,890 | mesbahamin/chronophore | chronophore/controller.py | undo_sign_out | def undo_sign_out(entry, session=None):
"""Sign in a signed out entry.
:param entry: `models.Entry` object. The entry to sign back in.
:param session: (optional) SQLAlchemy session through which to access the database.
""" # noqa
if session is None:
session = Session()
else:
session = session
entry_to_sign_in = (
session
.query(Entry)
.filter(Entry.uuid == entry.uuid)
.one_or_none()
)
if entry_to_sign_in:
logger.info('Undo sign out: {}'.format(entry_to_sign_in.user_id))
logger.debug('Undo sign out: {}'.format(entry_to_sign_in))
entry_to_sign_in.time_out = None
session.add(entry_to_sign_in)
session.commit()
else:
error_message = 'Entry not found: {}'.format(entry)
logger.error(error_message)
raise ValueError(error_message) | python | def undo_sign_out(entry, session=None):
"""Sign in a signed out entry.
:param entry: `models.Entry` object. The entry to sign back in.
:param session: (optional) SQLAlchemy session through which to access the database.
""" # noqa
if session is None:
session = Session()
else:
session = session
entry_to_sign_in = (
session
.query(Entry)
.filter(Entry.uuid == entry.uuid)
.one_or_none()
)
if entry_to_sign_in:
logger.info('Undo sign out: {}'.format(entry_to_sign_in.user_id))
logger.debug('Undo sign out: {}'.format(entry_to_sign_in))
entry_to_sign_in.time_out = None
session.add(entry_to_sign_in)
session.commit()
else:
error_message = 'Entry not found: {}'.format(entry)
logger.error(error_message)
raise ValueError(error_message) | ['def', 'undo_sign_out', '(', 'entry', ',', 'session', '=', 'None', ')', ':', '# noqa', 'if', 'session', 'is', 'None', ':', 'session', '=', 'Session', '(', ')', 'else', ':', 'session', '=', 'session', 'entry_to_sign_in', '=', '(', 'session', '.', 'query', '(', 'Entry', ')', '.', 'filter', '(', 'Entry', '.', 'uuid', '==', 'entry', '.', 'uuid', ')', '.', 'one_or_none', '(', ')', ')', 'if', 'entry_to_sign_in', ':', 'logger', '.', 'info', '(', "'Undo sign out: {}'", '.', 'format', '(', 'entry_to_sign_in', '.', 'user_id', ')', ')', 'logger', '.', 'debug', '(', "'Undo sign out: {}'", '.', 'format', '(', 'entry_to_sign_in', ')', ')', 'entry_to_sign_in', '.', 'time_out', '=', 'None', 'session', '.', 'add', '(', 'entry_to_sign_in', ')', 'session', '.', 'commit', '(', ')', 'else', ':', 'error_message', '=', "'Entry not found: {}'", '.', 'format', '(', 'entry', ')', 'logger', '.', 'error', '(', 'error_message', ')', 'raise', 'ValueError', '(', 'error_message', ')'] | Sign in a signed out entry.
:param entry: `models.Entry` object. The entry to sign back in.
:param session: (optional) SQLAlchemy session through which to access the database. | ['Sign', 'in', 'a', 'signed', 'out', 'entry', '.'] | train | https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/controller.py#L231-L258 |
9,891 | JasonKessler/scattertext | scattertext/TermDocMatrixFactory.py | TermDocMatrixFactory.censor_entity_types | def censor_entity_types(self, entity_types):
# type: (set) -> TermDocMatrixFactory
'''
Entity types to exclude from feature construction. Terms matching
specificed entities, instead of labeled by their lower case orthographic
form or lemma, will be labeled by their entity type.
Parameters
----------
entity_types : set of entity types outputted by spaCy
'TIME', 'WORK_OF_ART', 'PERSON', 'MONEY', 'ORG', 'ORDINAL', 'DATE',
'CARDINAL', 'LAW', 'QUANTITY', 'GPE', 'PERCENT'
Returns
---------
self
'''
assert type(entity_types) == set
self._entity_types_to_censor = entity_types
self._feats_from_spacy_doc = FeatsFromSpacyDoc(
use_lemmas=self._use_lemmas,
entity_types_to_censor=self._entity_types_to_censor
)
return self | python | def censor_entity_types(self, entity_types):
# type: (set) -> TermDocMatrixFactory
'''
Entity types to exclude from feature construction. Terms matching
specificed entities, instead of labeled by their lower case orthographic
form or lemma, will be labeled by their entity type.
Parameters
----------
entity_types : set of entity types outputted by spaCy
'TIME', 'WORK_OF_ART', 'PERSON', 'MONEY', 'ORG', 'ORDINAL', 'DATE',
'CARDINAL', 'LAW', 'QUANTITY', 'GPE', 'PERCENT'
Returns
---------
self
'''
assert type(entity_types) == set
self._entity_types_to_censor = entity_types
self._feats_from_spacy_doc = FeatsFromSpacyDoc(
use_lemmas=self._use_lemmas,
entity_types_to_censor=self._entity_types_to_censor
)
return self | ['def', 'censor_entity_types', '(', 'self', ',', 'entity_types', ')', ':', '# type: (set) -> TermDocMatrixFactory', 'assert', 'type', '(', 'entity_types', ')', '==', 'set', 'self', '.', '_entity_types_to_censor', '=', 'entity_types', 'self', '.', '_feats_from_spacy_doc', '=', 'FeatsFromSpacyDoc', '(', 'use_lemmas', '=', 'self', '.', '_use_lemmas', ',', 'entity_types_to_censor', '=', 'self', '.', '_entity_types_to_censor', ')', 'return', 'self'] | Entity types to exclude from feature construction. Terms matching
specificed entities, instead of labeled by their lower case orthographic
form or lemma, will be labeled by their entity type.
Parameters
----------
entity_types : set of entity types outputted by spaCy
'TIME', 'WORK_OF_ART', 'PERSON', 'MONEY', 'ORG', 'ORDINAL', 'DATE',
'CARDINAL', 'LAW', 'QUANTITY', 'GPE', 'PERCENT'
Returns
---------
self | ['Entity', 'types', 'to', 'exclude', 'from', 'feature', 'construction', '.', 'Terms', 'matching', 'specificed', 'entities', 'instead', 'of', 'labeled', 'by', 'their', 'lower', 'case', 'orthographic', 'form', 'or', 'lemma', 'will', 'be', 'labeled', 'by', 'their', 'entity', 'type', '.'] | train | https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/TermDocMatrixFactory.py#L149-L172 |
9,892 | linnarsson-lab/loompy | loompy/loompy.py | create_append | def create_append(filename: str, layers: Union[np.ndarray, Dict[str, np.ndarray], loompy.LayerManager], row_attrs: Dict[str, np.ndarray], col_attrs: Dict[str, np.ndarray], *, file_attrs: Dict[str, str] = None, fill_values: Dict[str, np.ndarray] = None) -> None:
"""
**DEPRECATED** - Use `new` instead; see https://github.com/linnarsson-lab/loompy/issues/42
"""
deprecated("'create_append' is deprecated. See https://github.com/linnarsson-lab/loompy/issues/42")
if os.path.exists(filename):
with connect(filename) as ds:
ds.add_columns(layers, col_attrs, fill_values=fill_values)
else:
create(filename, layers, row_attrs, col_attrs, file_attrs=file_attrs) | python | def create_append(filename: str, layers: Union[np.ndarray, Dict[str, np.ndarray], loompy.LayerManager], row_attrs: Dict[str, np.ndarray], col_attrs: Dict[str, np.ndarray], *, file_attrs: Dict[str, str] = None, fill_values: Dict[str, np.ndarray] = None) -> None:
"""
**DEPRECATED** - Use `new` instead; see https://github.com/linnarsson-lab/loompy/issues/42
"""
deprecated("'create_append' is deprecated. See https://github.com/linnarsson-lab/loompy/issues/42")
if os.path.exists(filename):
with connect(filename) as ds:
ds.add_columns(layers, col_attrs, fill_values=fill_values)
else:
create(filename, layers, row_attrs, col_attrs, file_attrs=file_attrs) | ['def', 'create_append', '(', 'filename', ':', 'str', ',', 'layers', ':', 'Union', '[', 'np', '.', 'ndarray', ',', 'Dict', '[', 'str', ',', 'np', '.', 'ndarray', ']', ',', 'loompy', '.', 'LayerManager', ']', ',', 'row_attrs', ':', 'Dict', '[', 'str', ',', 'np', '.', 'ndarray', ']', ',', 'col_attrs', ':', 'Dict', '[', 'str', ',', 'np', '.', 'ndarray', ']', ',', '*', ',', 'file_attrs', ':', 'Dict', '[', 'str', ',', 'str', ']', '=', 'None', ',', 'fill_values', ':', 'Dict', '[', 'str', ',', 'np', '.', 'ndarray', ']', '=', 'None', ')', '->', 'None', ':', 'deprecated', '(', '"\'create_append\' is deprecated. See https://github.com/linnarsson-lab/loompy/issues/42"', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'filename', ')', ':', 'with', 'connect', '(', 'filename', ')', 'as', 'ds', ':', 'ds', '.', 'add_columns', '(', 'layers', ',', 'col_attrs', ',', 'fill_values', '=', 'fill_values', ')', 'else', ':', 'create', '(', 'filename', ',', 'layers', ',', 'row_attrs', ',', 'col_attrs', ',', 'file_attrs', '=', 'file_attrs', ')'] | **DEPRECATED** - Use `new` instead; see https://github.com/linnarsson-lab/loompy/issues/42 | ['**', 'DEPRECATED', '**', '-', 'Use', 'new', 'instead', ';', 'see', 'https', ':', '//', 'github', '.', 'com', '/', 'linnarsson', '-', 'lab', '/', 'loompy', '/', 'issues', '/', '42'] | train | https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L981-L990 |
9,893 | ForensicArtifacts/artifacts | artifacts/artifact.py | ArtifactDefinition.AppendSource | def AppendSource(self, type_indicator, attributes):
"""Appends a source.
If you want to implement your own source type you should create a subclass
in source_type.py and change the AppendSource method to handle the new
subclass. This function raises FormatError if an unsupported source type
indicator is encountered.
Args:
type_indicator (str): source type indicator.
attributes (dict[str, object]): source attributes.
Returns:
SourceType: a source type.
Raises:
FormatError: if the type indicator is not set or unsupported,
or if required attributes are missing.
"""
if not type_indicator:
raise errors.FormatError('Missing type indicator.')
try:
source_object = registry.ArtifactDefinitionsRegistry.CreateSourceType(
type_indicator, attributes)
except (AttributeError, TypeError) as exception:
raise errors.FormatError((
'Unable to create source type: {0:s} for artifact definition: {1:s} '
'with error: {2!s}').format(type_indicator, self.name, exception))
self.sources.append(source_object)
return source_object | python | def AppendSource(self, type_indicator, attributes):
"""Appends a source.
If you want to implement your own source type you should create a subclass
in source_type.py and change the AppendSource method to handle the new
subclass. This function raises FormatError if an unsupported source type
indicator is encountered.
Args:
type_indicator (str): source type indicator.
attributes (dict[str, object]): source attributes.
Returns:
SourceType: a source type.
Raises:
FormatError: if the type indicator is not set or unsupported,
or if required attributes are missing.
"""
if not type_indicator:
raise errors.FormatError('Missing type indicator.')
try:
source_object = registry.ArtifactDefinitionsRegistry.CreateSourceType(
type_indicator, attributes)
except (AttributeError, TypeError) as exception:
raise errors.FormatError((
'Unable to create source type: {0:s} for artifact definition: {1:s} '
'with error: {2!s}').format(type_indicator, self.name, exception))
self.sources.append(source_object)
return source_object | ['def', 'AppendSource', '(', 'self', ',', 'type_indicator', ',', 'attributes', ')', ':', 'if', 'not', 'type_indicator', ':', 'raise', 'errors', '.', 'FormatError', '(', "'Missing type indicator.'", ')', 'try', ':', 'source_object', '=', 'registry', '.', 'ArtifactDefinitionsRegistry', '.', 'CreateSourceType', '(', 'type_indicator', ',', 'attributes', ')', 'except', '(', 'AttributeError', ',', 'TypeError', ')', 'as', 'exception', ':', 'raise', 'errors', '.', 'FormatError', '(', '(', "'Unable to create source type: {0:s} for artifact definition: {1:s} '", "'with error: {2!s}'", ')', '.', 'format', '(', 'type_indicator', ',', 'self', '.', 'name', ',', 'exception', ')', ')', 'self', '.', 'sources', '.', 'append', '(', 'source_object', ')', 'return', 'source_object'] | Appends a source.
If you want to implement your own source type you should create a subclass
in source_type.py and change the AppendSource method to handle the new
subclass. This function raises FormatError if an unsupported source type
indicator is encountered.
Args:
type_indicator (str): source type indicator.
attributes (dict[str, object]): source attributes.
Returns:
SourceType: a source type.
Raises:
FormatError: if the type indicator is not set or unsupported,
or if required attributes are missing. | ['Appends', 'a', 'source', '.'] | train | https://github.com/ForensicArtifacts/artifacts/blob/044a63bfb4448af33d085c69066c80f9505ae7ca/artifacts/artifact.py#L42-L73 |
9,894 | Yubico/python-pyhsm | pyhsm/base.py | YHSM.drain | def drain(self):
"""
Read until there is nothing more to be read. Only intended for test code/debugging!
@returns: True on success
@rtype: bool
"""
try:
unlock = self.stick.acquire()
return self.stick.drain()
finally:
unlock() | python | def drain(self):
"""
Read until there is nothing more to be read. Only intended for test code/debugging!
@returns: True on success
@rtype: bool
"""
try:
unlock = self.stick.acquire()
return self.stick.drain()
finally:
unlock() | ['def', 'drain', '(', 'self', ')', ':', 'try', ':', 'unlock', '=', 'self', '.', 'stick', '.', 'acquire', '(', ')', 'return', 'self', '.', 'stick', '.', 'drain', '(', ')', 'finally', ':', 'unlock', '(', ')'] | Read until there is nothing more to be read. Only intended for test code/debugging!
@returns: True on success
@rtype: bool | ['Read', 'until', 'there', 'is', 'nothing', 'more', 'to', 'be', 'read', '.', 'Only', 'intended', 'for', 'test', 'code', '/', 'debugging!'] | train | https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/base.py#L489-L500 |
9,895 | basho/riak-python-client | riak/client/operations.py | RiakClientOperations.set_bucket_props | def set_bucket_props(self, transport, bucket, props):
"""
set_bucket_props(bucket, props)
Sets bucket properties for the given bucket.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket whose properties will be set
:type bucket: RiakBucket
:param props: the properties to set
:type props: dict
"""
_validate_bucket_props(props)
return transport.set_bucket_props(bucket, props) | python | def set_bucket_props(self, transport, bucket, props):
"""
set_bucket_props(bucket, props)
Sets bucket properties for the given bucket.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket whose properties will be set
:type bucket: RiakBucket
:param props: the properties to set
:type props: dict
"""
_validate_bucket_props(props)
return transport.set_bucket_props(bucket, props) | ['def', 'set_bucket_props', '(', 'self', ',', 'transport', ',', 'bucket', ',', 'props', ')', ':', '_validate_bucket_props', '(', 'props', ')', 'return', 'transport', '.', 'set_bucket_props', '(', 'bucket', ',', 'props', ')'] | set_bucket_props(bucket, props)
Sets bucket properties for the given bucket.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket whose properties will be set
:type bucket: RiakBucket
:param props: the properties to set
:type props: dict | ['set_bucket_props', '(', 'bucket', 'props', ')'] | train | https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/operations.py#L392-L407 |
9,896 | nugget/python-insteonplm | insteonplm/plm.py | IM.poll_devices | def poll_devices(self):
"""Request status updates from each device."""
for addr in self.devices:
device = self.devices[addr]
if not device.address.is_x10:
device.async_refresh_state() | python | def poll_devices(self):
"""Request status updates from each device."""
for addr in self.devices:
device = self.devices[addr]
if not device.address.is_x10:
device.async_refresh_state() | ['def', 'poll_devices', '(', 'self', ')', ':', 'for', 'addr', 'in', 'self', '.', 'devices', ':', 'device', '=', 'self', '.', 'devices', '[', 'addr', ']', 'if', 'not', 'device', '.', 'address', '.', 'is_x10', ':', 'device', '.', 'async_refresh_state', '(', ')'] | Request status updates from each device. | ['Request', 'status', 'updates', 'from', 'each', 'device', '.'] | train | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/plm.py#L171-L176 |
9,897 | openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations.get_value_as_list | def get_value_as_list(self, dictionary, key):
"""Helper function to check and convert a value to list.
Helper function to check and convert a value to json list.
This helps the ribcl data to be generalized across the servers.
:param dictionary: a dictionary to check in if key is present.
:param key: key to be checked if thats present in the given dictionary.
:returns the data converted to a list.
"""
if key not in dictionary:
return None
value = dictionary[key]
if not isinstance(value, list):
return [value]
else:
return value | python | def get_value_as_list(self, dictionary, key):
"""Helper function to check and convert a value to list.
Helper function to check and convert a value to json list.
This helps the ribcl data to be generalized across the servers.
:param dictionary: a dictionary to check in if key is present.
:param key: key to be checked if thats present in the given dictionary.
:returns the data converted to a list.
"""
if key not in dictionary:
return None
value = dictionary[key]
if not isinstance(value, list):
return [value]
else:
return value | ['def', 'get_value_as_list', '(', 'self', ',', 'dictionary', ',', 'key', ')', ':', 'if', 'key', 'not', 'in', 'dictionary', ':', 'return', 'None', 'value', '=', 'dictionary', '[', 'key', ']', 'if', 'not', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'return', '[', 'value', ']', 'else', ':', 'return', 'value'] | Helper function to check and convert a value to list.
Helper function to check and convert a value to json list.
This helps the ribcl data to be generalized across the servers.
:param dictionary: a dictionary to check in if key is present.
:param key: key to be checked if thats present in the given dictionary.
:returns the data converted to a list. | ['Helper', 'function', 'to', 'check', 'and', 'convert', 'a', 'value', 'to', 'list', '.'] | train | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L940-L957 |
9,898 | Ouranosinc/xclim | docs/conf.py | _get_indicators | def _get_indicators(modules):
"""For all modules or classes listed, return the children that are instances of xclim.utils.Indicator.
modules : sequence
Sequence of modules to inspect.
"""
out = []
for obj in modules:
for key, val in obj.__dict__.items():
if isinstance(val, xcu.Indicator):
out.append(val)
return out | python | def _get_indicators(modules):
"""For all modules or classes listed, return the children that are instances of xclim.utils.Indicator.
modules : sequence
Sequence of modules to inspect.
"""
out = []
for obj in modules:
for key, val in obj.__dict__.items():
if isinstance(val, xcu.Indicator):
out.append(val)
return out | ['def', '_get_indicators', '(', 'modules', ')', ':', 'out', '=', '[', ']', 'for', 'obj', 'in', 'modules', ':', 'for', 'key', ',', 'val', 'in', 'obj', '.', '__dict__', '.', 'items', '(', ')', ':', 'if', 'isinstance', '(', 'val', ',', 'xcu', '.', 'Indicator', ')', ':', 'out', '.', 'append', '(', 'val', ')', 'return', 'out'] | For all modules or classes listed, return the children that are instances of xclim.utils.Indicator.
modules : sequence
Sequence of modules to inspect. | ['For', 'all', 'modules', 'or', 'classes', 'listed', 'return', 'the', 'children', 'that', 'are', 'instances', 'of', 'xclim', '.', 'utils', '.', 'Indicator', '.'] | train | https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/docs/conf.py#L32-L44 |
9,899 | geophysics-ubonn/crtomo_tools | lib/crtomo/grid.py | crt_grid.Wm | def Wm(self):
"""Return the smoothing regularization matrix Wm of the grid
"""
centroids = self.get_element_centroids()
Wm = scipy.sparse.csr_matrix(
(self.nr_of_elements, self.nr_of_elements))
# Wm = np.zeros((self.nr_of_elements, self.nr_of_elements))
for i, nb in enumerate(self.element_neighbors):
for j, edges in zip(nb, self.element_neighbors_edges[i]):
# side length
edge_coords = self.nodes['presort'][edges][:, 1:]
edge_length = np.linalg.norm(
edge_coords[1, :] - edge_coords[0, :]
)
distance = np.linalg.norm(centroids[i] - centroids[j])
# main diagonal
Wm[i, i] += edge_length / distance
# side diagonals
Wm[i, j] -= edge_length / distance
return Wm | python | def Wm(self):
"""Return the smoothing regularization matrix Wm of the grid
"""
centroids = self.get_element_centroids()
Wm = scipy.sparse.csr_matrix(
(self.nr_of_elements, self.nr_of_elements))
# Wm = np.zeros((self.nr_of_elements, self.nr_of_elements))
for i, nb in enumerate(self.element_neighbors):
for j, edges in zip(nb, self.element_neighbors_edges[i]):
# side length
edge_coords = self.nodes['presort'][edges][:, 1:]
edge_length = np.linalg.norm(
edge_coords[1, :] - edge_coords[0, :]
)
distance = np.linalg.norm(centroids[i] - centroids[j])
# main diagonal
Wm[i, i] += edge_length / distance
# side diagonals
Wm[i, j] -= edge_length / distance
return Wm | ['def', 'Wm', '(', 'self', ')', ':', 'centroids', '=', 'self', '.', 'get_element_centroids', '(', ')', 'Wm', '=', 'scipy', '.', 'sparse', '.', 'csr_matrix', '(', '(', 'self', '.', 'nr_of_elements', ',', 'self', '.', 'nr_of_elements', ')', ')', '# Wm = np.zeros((self.nr_of_elements, self.nr_of_elements))', 'for', 'i', ',', 'nb', 'in', 'enumerate', '(', 'self', '.', 'element_neighbors', ')', ':', 'for', 'j', ',', 'edges', 'in', 'zip', '(', 'nb', ',', 'self', '.', 'element_neighbors_edges', '[', 'i', ']', ')', ':', '# side length', 'edge_coords', '=', 'self', '.', 'nodes', '[', "'presort'", ']', '[', 'edges', ']', '[', ':', ',', '1', ':', ']', 'edge_length', '=', 'np', '.', 'linalg', '.', 'norm', '(', 'edge_coords', '[', '1', ',', ':', ']', '-', 'edge_coords', '[', '0', ',', ':', ']', ')', 'distance', '=', 'np', '.', 'linalg', '.', 'norm', '(', 'centroids', '[', 'i', ']', '-', 'centroids', '[', 'j', ']', ')', '# main diagonal', 'Wm', '[', 'i', ',', 'i', ']', '+=', 'edge_length', '/', 'distance', '# side diagonals', 'Wm', '[', 'i', ',', 'j', ']', '-=', 'edge_length', '/', 'distance', 'return', 'Wm'] | Return the smoothing regularization matrix Wm of the grid | ['Return', 'the', 'smoothing', 'regularization', 'matrix', 'Wm', 'of', 'the', 'grid'] | train | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/grid.py#L672-L694 |