Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
8,600 | googleads/googleads-python-lib | examples/adwords/v201809/campaign_management/add_keywords_using_incremental_batch_job.py | GetBatchJob | def GetBatchJob(client, batch_job_id):
"""Retrieves the BatchJob with the given id.
Args:
client: an instantiated AdWordsClient used to retrieve the BatchJob.
batch_job_id: a long identifying the BatchJob to be retrieved.
Returns:
The BatchJob associated with the given id.
"""
batch_job_service = client.GetService('BatchJobService', 'v201809')
selector = {
'fields': ['Id', 'Status', 'DownloadUrl'],
'predicates': [
{
'field': 'Id',
'operator': 'EQUALS',
'values': [batch_job_id]
}
]
}
return batch_job_service.get(selector)['entries'][0] | python | def GetBatchJob(client, batch_job_id):
"""Retrieves the BatchJob with the given id.
Args:
client: an instantiated AdWordsClient used to retrieve the BatchJob.
batch_job_id: a long identifying the BatchJob to be retrieved.
Returns:
The BatchJob associated with the given id.
"""
batch_job_service = client.GetService('BatchJobService', 'v201809')
selector = {
'fields': ['Id', 'Status', 'DownloadUrl'],
'predicates': [
{
'field': 'Id',
'operator': 'EQUALS',
'values': [batch_job_id]
}
]
}
return batch_job_service.get(selector)['entries'][0] | ['def', 'GetBatchJob', '(', 'client', ',', 'batch_job_id', ')', ':', 'batch_job_service', '=', 'client', '.', 'GetService', '(', "'BatchJobService'", ',', "'v201809'", ')', 'selector', '=', '{', "'fields'", ':', '[', "'Id'", ',', "'Status'", ',', "'DownloadUrl'", ']', ',', "'predicates'", ':', '[', '{', "'field'", ':', "'Id'", ',', "'operator'", ':', "'EQUALS'", ',', "'values'", ':', '[', 'batch_job_id', ']', '}', ']', '}', 'return', 'batch_job_service', '.', 'get', '(', 'selector', ')', '[', "'entries'", ']', '[', '0', ']'] | Retrieves the BatchJob with the given id.
Args:
client: an instantiated AdWordsClient used to retrieve the BatchJob.
batch_job_id: a long identifying the BatchJob to be retrieved.
Returns:
The BatchJob associated with the given id. | ['Retrieves', 'the', 'BatchJob', 'with', 'the', 'given', 'id', '.'] | train | https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/examples/adwords/v201809/campaign_management/add_keywords_using_incremental_batch_job.py#L163-L185 |
8,601 | PolicyStat/jobtastic | jobtastic/task.py | JobtasticTask.on_success | def on_success(self, retval, task_id, args, kwargs):
"""
Store results in the backend even if we're always eager. This ensures
the `delay_or_run` calls always at least have results.
"""
if self.request.is_eager:
# Store the result because celery wouldn't otherwise
self.update_state(task_id, SUCCESS, retval) | python | def on_success(self, retval, task_id, args, kwargs):
"""
Store results in the backend even if we're always eager. This ensures
the `delay_or_run` calls always at least have results.
"""
if self.request.is_eager:
# Store the result because celery wouldn't otherwise
self.update_state(task_id, SUCCESS, retval) | ['def', 'on_success', '(', 'self', ',', 'retval', ',', 'task_id', ',', 'args', ',', 'kwargs', ')', ':', 'if', 'self', '.', 'request', '.', 'is_eager', ':', "# Store the result because celery wouldn't otherwise", 'self', '.', 'update_state', '(', 'task_id', ',', 'SUCCESS', ',', 'retval', ')'] | Store results in the backend even if we're always eager. This ensures
the `delay_or_run` calls always at least have results. | ['Store', 'results', 'in', 'the', 'backend', 'even', 'if', 'we', 're', 'always', 'eager', '.', 'This', 'ensures', 'the', 'delay_or_run', 'calls', 'always', 'at', 'least', 'have', 'results', '.'] | train | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L392-L399 |
8,602 | bslatkin/dpxdt | dpxdt/server/work_queue_handlers.py | handle_add | def handle_add(queue_name):
"""Adds a task to a queue."""
source = request.form.get('source', request.remote_addr, type=str)
try:
task_id = work_queue.add(
queue_name,
payload=request.form.get('payload', type=str),
content_type=request.form.get('content_type', type=str),
source=source,
task_id=request.form.get('task_id', type=str))
except work_queue.Error, e:
return utils.jsonify_error(e)
db.session.commit()
logging.info('Task added: queue=%r, task_id=%r, source=%r',
queue_name, task_id, source)
return flask.jsonify(task_id=task_id) | python | def handle_add(queue_name):
"""Adds a task to a queue."""
source = request.form.get('source', request.remote_addr, type=str)
try:
task_id = work_queue.add(
queue_name,
payload=request.form.get('payload', type=str),
content_type=request.form.get('content_type', type=str),
source=source,
task_id=request.form.get('task_id', type=str))
except work_queue.Error, e:
return utils.jsonify_error(e)
db.session.commit()
logging.info('Task added: queue=%r, task_id=%r, source=%r',
queue_name, task_id, source)
return flask.jsonify(task_id=task_id) | ['def', 'handle_add', '(', 'queue_name', ')', ':', 'source', '=', 'request', '.', 'form', '.', 'get', '(', "'source'", ',', 'request', '.', 'remote_addr', ',', 'type', '=', 'str', ')', 'try', ':', 'task_id', '=', 'work_queue', '.', 'add', '(', 'queue_name', ',', 'payload', '=', 'request', '.', 'form', '.', 'get', '(', "'payload'", ',', 'type', '=', 'str', ')', ',', 'content_type', '=', 'request', '.', 'form', '.', 'get', '(', "'content_type'", ',', 'type', '=', 'str', ')', ',', 'source', '=', 'source', ',', 'task_id', '=', 'request', '.', 'form', '.', 'get', '(', "'task_id'", ',', 'type', '=', 'str', ')', ')', 'except', 'work_queue', '.', 'Error', ',', 'e', ':', 'return', 'utils', '.', 'jsonify_error', '(', 'e', ')', 'db', '.', 'session', '.', 'commit', '(', ')', 'logging', '.', 'info', '(', "'Task added: queue=%r, task_id=%r, source=%r'", ',', 'queue_name', ',', 'task_id', ',', 'source', ')', 'return', 'flask', '.', 'jsonify', '(', 'task_id', '=', 'task_id', ')'] | Adds a task to a queue. | ['Adds', 'a', 'task', 'to', 'a', 'queue', '.'] | train | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue_handlers.py#L37-L53 |
8,603 | google/tangent | tangent/grad_util.py | grad | def grad(func,
wrt=(0,),
optimized=True,
preserve_result=False,
check_dims=True,
verbose=0):
"""Return the gradient of a function `func`.
Args:
func: The function to take the gradient of.
wrt: A tuple of argument indices to differentiate with respect to. By
default the derivative is taken with respect to the first argument.
optimized: Whether to optimize the gradient function (`True` by default).
preserve_result: A boolean indicating whether or not the generated gradient
function should also return the output of the original function.
If False, the return signature of the input and output functions will be
> val = func(*args)
> df = grad(func,preserve_result=False)
> gradval = df(*args)
If True,
> val = func(*args)
> df = grad(func,preserve_result=True)
> gradval, val = df(*args)
Note that if taking gradients with respect to multiple arguments,
the primal value will be appended to the return signature. Ex:
> val = func(x,y)
> df = grad(func,wrt=(0,1),preserve_result=True)
> dx,dy,val = df(x,y)
check_dims: A boolean (`True` by default) indicating whether to check
that the result of the original function `func` is a scalar, raising
an error if it is not.
Gradients are only valid for scalar-valued outputs, so we check
this by defualt.
verbose: If 1 the source code of the generated functions will be
output to stdout at various stages of the process for debugging
purposes. If > 1, all intermediate code generation steps will print.
Returns:
df: A function that calculates the gradient with respect to arguments
specified in `wrt`, using forward or reverse mode according to `mode`.
If using reverse mode, the gradient is calculated in either split
or joint motion according to the value passed in `motion`. If
`preserve_result` is True, the function will also return the original
result of `func`.
"""
return autodiff(
func,
wrt=wrt,
motion='joint',
mode='reverse',
optimized=optimized,
preserve_result=preserve_result,
check_dims=check_dims,
input_derivative=INPUT_DERIVATIVE.DefaultOne,
verbose=verbose) | python | def grad(func,
wrt=(0,),
optimized=True,
preserve_result=False,
check_dims=True,
verbose=0):
"""Return the gradient of a function `func`.
Args:
func: The function to take the gradient of.
wrt: A tuple of argument indices to differentiate with respect to. By
default the derivative is taken with respect to the first argument.
optimized: Whether to optimize the gradient function (`True` by default).
preserve_result: A boolean indicating whether or not the generated gradient
function should also return the output of the original function.
If False, the return signature of the input and output functions will be
> val = func(*args)
> df = grad(func,preserve_result=False)
> gradval = df(*args)
If True,
> val = func(*args)
> df = grad(func,preserve_result=True)
> gradval, val = df(*args)
Note that if taking gradients with respect to multiple arguments,
the primal value will be appended to the return signature. Ex:
> val = func(x,y)
> df = grad(func,wrt=(0,1),preserve_result=True)
> dx,dy,val = df(x,y)
check_dims: A boolean (`True` by default) indicating whether to check
that the result of the original function `func` is a scalar, raising
an error if it is not.
Gradients are only valid for scalar-valued outputs, so we check
this by defualt.
verbose: If 1 the source code of the generated functions will be
output to stdout at various stages of the process for debugging
purposes. If > 1, all intermediate code generation steps will print.
Returns:
df: A function that calculates the gradient with respect to arguments
specified in `wrt`, using forward or reverse mode according to `mode`.
If using reverse mode, the gradient is calculated in either split
or joint motion according to the value passed in `motion`. If
`preserve_result` is True, the function will also return the original
result of `func`.
"""
return autodiff(
func,
wrt=wrt,
motion='joint',
mode='reverse',
optimized=optimized,
preserve_result=preserve_result,
check_dims=check_dims,
input_derivative=INPUT_DERIVATIVE.DefaultOne,
verbose=verbose) | ['def', 'grad', '(', 'func', ',', 'wrt', '=', '(', '0', ',', ')', ',', 'optimized', '=', 'True', ',', 'preserve_result', '=', 'False', ',', 'check_dims', '=', 'True', ',', 'verbose', '=', '0', ')', ':', 'return', 'autodiff', '(', 'func', ',', 'wrt', '=', 'wrt', ',', 'motion', '=', "'joint'", ',', 'mode', '=', "'reverse'", ',', 'optimized', '=', 'optimized', ',', 'preserve_result', '=', 'preserve_result', ',', 'check_dims', '=', 'check_dims', ',', 'input_derivative', '=', 'INPUT_DERIVATIVE', '.', 'DefaultOne', ',', 'verbose', '=', 'verbose', ')'] | Return the gradient of a function `func`.
Args:
func: The function to take the gradient of.
wrt: A tuple of argument indices to differentiate with respect to. By
default the derivative is taken with respect to the first argument.
optimized: Whether to optimize the gradient function (`True` by default).
preserve_result: A boolean indicating whether or not the generated gradient
function should also return the output of the original function.
If False, the return signature of the input and output functions will be
> val = func(*args)
> df = grad(func,preserve_result=False)
> gradval = df(*args)
If True,
> val = func(*args)
> df = grad(func,preserve_result=True)
> gradval, val = df(*args)
Note that if taking gradients with respect to multiple arguments,
the primal value will be appended to the return signature. Ex:
> val = func(x,y)
> df = grad(func,wrt=(0,1),preserve_result=True)
> dx,dy,val = df(x,y)
check_dims: A boolean (`True` by default) indicating whether to check
that the result of the original function `func` is a scalar, raising
an error if it is not.
Gradients are only valid for scalar-valued outputs, so we check
this by defualt.
verbose: If 1 the source code of the generated functions will be
output to stdout at various stages of the process for debugging
purposes. If > 1, all intermediate code generation steps will print.
Returns:
df: A function that calculates the gradient with respect to arguments
specified in `wrt`, using forward or reverse mode according to `mode`.
If using reverse mode, the gradient is calculated in either split
or joint motion according to the value passed in `motion`. If
`preserve_result` is True, the function will also return the original
result of `func`. | ['Return', 'the', 'gradient', 'of', 'a', 'function', 'func', '.', 'Args', ':', 'func', ':', 'The', 'function', 'to', 'take', 'the', 'gradient', 'of', '.', 'wrt', ':', 'A', 'tuple', 'of', 'argument', 'indices', 'to', 'differentiate', 'with', 'respect', 'to', '.', 'By', 'default', 'the', 'derivative', 'is', 'taken', 'with', 'respect', 'to', 'the', 'first', 'argument', '.', 'optimized', ':', 'Whether', 'to', 'optimize', 'the', 'gradient', 'function', '(', 'True', 'by', 'default', ')', '.', 'preserve_result', ':', 'A', 'boolean', 'indicating', 'whether', 'or', 'not', 'the', 'generated', 'gradient', 'function', 'should', 'also', 'return', 'the', 'output', 'of', 'the', 'original', 'function', '.', 'If', 'False', 'the', 'return', 'signature', 'of', 'the', 'input', 'and', 'output', 'functions', 'will', 'be', '>', 'val', '=', 'func', '(', '*', 'args', ')', '>', 'df', '=', 'grad', '(', 'func', 'preserve_result', '=', 'False', ')', '>', 'gradval', '=', 'df', '(', '*', 'args', ')', 'If', 'True', '>', 'val', '=', 'func', '(', '*', 'args', ')', '>', 'df', '=', 'grad', '(', 'func', 'preserve_result', '=', 'True', ')', '>', 'gradval', 'val', '=', 'df', '(', '*', 'args', ')', 'Note', 'that', 'if', 'taking', 'gradients', 'with', 'respect', 'to', 'multiple', 'arguments', 'the', 'primal', 'value', 'will', 'be', 'appended', 'to', 'the', 'return', 'signature', '.', 'Ex', ':', '>', 'val', '=', 'func', '(', 'x', 'y', ')', '>', 'df', '=', 'grad', '(', 'func', 'wrt', '=', '(', '0', '1', ')', 'preserve_result', '=', 'True', ')', '>', 'dx', 'dy', 'val', '=', 'df', '(', 'x', 'y', ')', 'check_dims', ':', 'A', 'boolean', '(', 'True', 'by', 'default', ')', 'indicating', 'whether', 'to', 'check', 'that', 'the', 'result', 'of', 'the', 'original', 'function', 'func', 'is', 'a', 'scalar', 'raising', 'an', 'error', 'if', 'it', 'is', 'not', '.', 'Gradients', 'are', 'only', 'valid', 'for', 'scalar', '-', 'valued', 'outputs', 'so', 'we', 'check', 'this', 'by', 'defualt', '.', 'verbose', ':', 'If', '1', 'the', 'source', 'code', 'of', 'the', 'generated', 'functions', 'will', 'be', 'output', 'to', 'stdout', 'at', 'various', 'stages', 'of', 'the', 'process', 'for', 'debugging', 'purposes', '.', 'If', '>', '1', 'all', 'intermediate', 'code', 'generation', 'steps', 'will', 'print', '.'] | train | https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/grad_util.py#L335-L388 |
8,604 | ANTsX/ANTsPy | ants/core/ants_image_io.py | images_to_matrix | def images_to_matrix(image_list, mask=None, sigma=None, epsilon=0.5 ):
"""
Read images into rows of a matrix, given a mask - much faster for
large datasets as it is based on C++ implementations.
ANTsR function: `imagesToMatrix`
Arguments
---------
image_list : list of ANTsImage types
images to convert to ndarray
mask : ANTsImage (optional)
image containing binary mask. voxels in the mask are placed in the matrix
sigma : scaler (optional)
smoothing factor
epsilon : scalar
threshold for mask
Returns
-------
ndarray
array with a row for each image
shape = (N_IMAGES, N_VOXELS)
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_ants_data('r16'))
>>> img2 = ants.image_read(ants.get_ants_data('r16'))
>>> img3 = ants.image_read(ants.get_ants_data('r16'))
>>> mat = ants.image_list_to_matrix([img,img2,img3])
"""
def listfunc(x):
if np.sum(np.array(x.shape) - np.array(mask.shape)) != 0:
x = reg.resample_image_to_target(x, mask, 2)
return x[mask]
if mask is None:
mask = utils.get_mask(image_list[0])
num_images = len(image_list)
mask_arr = mask.numpy() >= epsilon
num_voxels = np.sum(mask_arr)
data_matrix = np.empty((num_images, num_voxels))
do_smooth = sigma is not None
for i,img in enumerate(image_list):
if do_smooth:
data_matrix[i, :] = listfunc(utils.smooth_image(img, sigma, sigma_in_physical_coordinates=True))
else:
data_matrix[i,:] = listfunc(img)
return data_matrix | python | def images_to_matrix(image_list, mask=None, sigma=None, epsilon=0.5 ):
"""
Read images into rows of a matrix, given a mask - much faster for
large datasets as it is based on C++ implementations.
ANTsR function: `imagesToMatrix`
Arguments
---------
image_list : list of ANTsImage types
images to convert to ndarray
mask : ANTsImage (optional)
image containing binary mask. voxels in the mask are placed in the matrix
sigma : scaler (optional)
smoothing factor
epsilon : scalar
threshold for mask
Returns
-------
ndarray
array with a row for each image
shape = (N_IMAGES, N_VOXELS)
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_ants_data('r16'))
>>> img2 = ants.image_read(ants.get_ants_data('r16'))
>>> img3 = ants.image_read(ants.get_ants_data('r16'))
>>> mat = ants.image_list_to_matrix([img,img2,img3])
"""
def listfunc(x):
if np.sum(np.array(x.shape) - np.array(mask.shape)) != 0:
x = reg.resample_image_to_target(x, mask, 2)
return x[mask]
if mask is None:
mask = utils.get_mask(image_list[0])
num_images = len(image_list)
mask_arr = mask.numpy() >= epsilon
num_voxels = np.sum(mask_arr)
data_matrix = np.empty((num_images, num_voxels))
do_smooth = sigma is not None
for i,img in enumerate(image_list):
if do_smooth:
data_matrix[i, :] = listfunc(utils.smooth_image(img, sigma, sigma_in_physical_coordinates=True))
else:
data_matrix[i,:] = listfunc(img)
return data_matrix | ['def', 'images_to_matrix', '(', 'image_list', ',', 'mask', '=', 'None', ',', 'sigma', '=', 'None', ',', 'epsilon', '=', '0.5', ')', ':', 'def', 'listfunc', '(', 'x', ')', ':', 'if', 'np', '.', 'sum', '(', 'np', '.', 'array', '(', 'x', '.', 'shape', ')', '-', 'np', '.', 'array', '(', 'mask', '.', 'shape', ')', ')', '!=', '0', ':', 'x', '=', 'reg', '.', 'resample_image_to_target', '(', 'x', ',', 'mask', ',', '2', ')', 'return', 'x', '[', 'mask', ']', 'if', 'mask', 'is', 'None', ':', 'mask', '=', 'utils', '.', 'get_mask', '(', 'image_list', '[', '0', ']', ')', 'num_images', '=', 'len', '(', 'image_list', ')', 'mask_arr', '=', 'mask', '.', 'numpy', '(', ')', '>=', 'epsilon', 'num_voxels', '=', 'np', '.', 'sum', '(', 'mask_arr', ')', 'data_matrix', '=', 'np', '.', 'empty', '(', '(', 'num_images', ',', 'num_voxels', ')', ')', 'do_smooth', '=', 'sigma', 'is', 'not', 'None', 'for', 'i', ',', 'img', 'in', 'enumerate', '(', 'image_list', ')', ':', 'if', 'do_smooth', ':', 'data_matrix', '[', 'i', ',', ':', ']', '=', 'listfunc', '(', 'utils', '.', 'smooth_image', '(', 'img', ',', 'sigma', ',', 'sigma_in_physical_coordinates', '=', 'True', ')', ')', 'else', ':', 'data_matrix', '[', 'i', ',', ':', ']', '=', 'listfunc', '(', 'img', ')', 'return', 'data_matrix'] | Read images into rows of a matrix, given a mask - much faster for
large datasets as it is based on C++ implementations.
ANTsR function: `imagesToMatrix`
Arguments
---------
image_list : list of ANTsImage types
images to convert to ndarray
mask : ANTsImage (optional)
image containing binary mask. voxels in the mask are placed in the matrix
sigma : scaler (optional)
smoothing factor
epsilon : scalar
threshold for mask
Returns
-------
ndarray
array with a row for each image
shape = (N_IMAGES, N_VOXELS)
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_ants_data('r16'))
>>> img2 = ants.image_read(ants.get_ants_data('r16'))
>>> img3 = ants.image_read(ants.get_ants_data('r16'))
>>> mat = ants.image_list_to_matrix([img,img2,img3]) | ['Read', 'images', 'into', 'rows', 'of', 'a', 'matrix', 'given', 'a', 'mask', '-', 'much', 'faster', 'for', 'large', 'datasets', 'as', 'it', 'is', 'based', 'on', 'C', '++', 'implementations', '.'] | train | https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/core/ants_image_io.py#L247-L301 |
8,605 | soravux/scoop | examples/image_resize.py | resizeTile | def resizeTile(index, size):
"""Apply Antialiasing resizing to tile"""
resized = tiles[index].resize(size, Image.ANTIALIAS)
return sImage(resized.tostring(), resized.size, resized.mode) | python | def resizeTile(index, size):
"""Apply Antialiasing resizing to tile"""
resized = tiles[index].resize(size, Image.ANTIALIAS)
return sImage(resized.tostring(), resized.size, resized.mode) | ['def', 'resizeTile', '(', 'index', ',', 'size', ')', ':', 'resized', '=', 'tiles', '[', 'index', ']', '.', 'resize', '(', 'size', ',', 'Image', '.', 'ANTIALIAS', ')', 'return', 'sImage', '(', 'resized', '.', 'tostring', '(', ')', ',', 'resized', '.', 'size', ',', 'resized', '.', 'mode', ')'] | Apply Antialiasing resizing to tile | ['Apply', 'Antialiasing', 'resizing', 'to', 'tile'] | train | https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/examples/image_resize.py#L61-L64 |
8,606 | hydpy-dev/hydpy | hydpy/models/dam/dam_model.py | update_actualremoterelieve_v1 | def update_actualremoterelieve_v1(self):
"""Constrain the actual relieve discharge to a remote location.
Required control parameter:
|HighestRemoteDischarge|
Required derived parameter:
|HighestRemoteSmoothPar|
Updated flux sequence:
|ActualRemoteRelieve|
Basic equation - discontinous:
:math:`ActualRemoteRelieve = min(ActualRemoteRelease,
HighestRemoteDischarge)`
Basic equation - continous:
:math:`ActualRemoteRelieve = smooth_min1(ActualRemoteRelieve,
HighestRemoteDischarge, HighestRemoteSmoothPar)`
Used auxiliary methods:
|smooth_min1|
|smooth_max1|
Note that the given continous basic equation is a simplification of
the complete algorithm to update |ActualRemoteRelieve|, which also
makes use of |smooth_max1| to prevent from gaining negative values
in a smooth manner.
Examples:
Prepare a dam model:
>>> from hydpy.models.dam import *
>>> parameterstep()
Prepare a test function object that performs eight examples with
|ActualRemoteRelieve| ranging from 0 to 8 m³/s and a fixed
initial value of parameter |HighestRemoteDischarge| of 4 m³/s:
>>> highestremotedischarge(4.0)
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.update_actualremoterelieve_v1,
... last_example=8,
... parseqs=(fluxes.actualremoterelieve,))
>>> test.nexts.actualremoterelieve = range(8)
Through setting the value of |HighestRemoteTolerance| to the
lowest possible value, there is no smoothing. Instead, the
shown relationship agrees with a combination of the discontinuous
minimum and maximum function:
>>> highestremotetolerance(0.0)
>>> derived.highestremotesmoothpar.update()
>>> test()
| ex. | actualremoterelieve |
-----------------------------
| 1 | 0.0 |
| 2 | 1.0 |
| 3 | 2.0 |
| 4 | 3.0 |
| 5 | 4.0 |
| 6 | 4.0 |
| 7 | 4.0 |
| 8 | 4.0 |
Setting a sensible |HighestRemoteTolerance| value results in
a moderate smoothing:
>>> highestremotetolerance(0.1)
>>> derived.highestremotesmoothpar.update()
>>> test()
| ex. | actualremoterelieve |
-----------------------------
| 1 | 0.0 |
| 2 | 0.999999 |
| 3 | 1.99995 |
| 4 | 2.996577 |
| 5 | 3.836069 |
| 6 | 3.991578 |
| 7 | 3.993418 |
| 8 | 3.993442 |
Method |update_actualremoterelieve_v1| is defined in a similar
way as method |calc_actualremoterelieve_v1|. Please read the
documentation on |calc_actualremoterelieve_v1| for further
information.
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
d_smooth = der.highestremotesmoothpar
d_highest = con.highestremotedischarge
d_value = smoothutils.smooth_min1(
flu.actualremoterelieve, d_highest, d_smooth)
for dummy in range(5):
d_smooth /= 5.
d_value = smoothutils.smooth_max1(
d_value, 0., d_smooth)
d_smooth /= 5.
d_value = smoothutils.smooth_min1(
d_value, d_highest, d_smooth)
d_value = min(d_value, flu.actualremoterelieve)
d_value = min(d_value, d_highest)
flu.actualremoterelieve = max(d_value, 0.) | python | def update_actualremoterelieve_v1(self):
"""Constrain the actual relieve discharge to a remote location.
Required control parameter:
|HighestRemoteDischarge|
Required derived parameter:
|HighestRemoteSmoothPar|
Updated flux sequence:
|ActualRemoteRelieve|
Basic equation - discontinous:
:math:`ActualRemoteRelieve = min(ActualRemoteRelease,
HighestRemoteDischarge)`
Basic equation - continous:
:math:`ActualRemoteRelieve = smooth_min1(ActualRemoteRelieve,
HighestRemoteDischarge, HighestRemoteSmoothPar)`
Used auxiliary methods:
|smooth_min1|
|smooth_max1|
Note that the given continous basic equation is a simplification of
the complete algorithm to update |ActualRemoteRelieve|, which also
makes use of |smooth_max1| to prevent from gaining negative values
in a smooth manner.
Examples:
Prepare a dam model:
>>> from hydpy.models.dam import *
>>> parameterstep()
Prepare a test function object that performs eight examples with
|ActualRemoteRelieve| ranging from 0 to 8 m³/s and a fixed
initial value of parameter |HighestRemoteDischarge| of 4 m³/s:
>>> highestremotedischarge(4.0)
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.update_actualremoterelieve_v1,
... last_example=8,
... parseqs=(fluxes.actualremoterelieve,))
>>> test.nexts.actualremoterelieve = range(8)
Through setting the value of |HighestRemoteTolerance| to the
lowest possible value, there is no smoothing. Instead, the
shown relationship agrees with a combination of the discontinuous
minimum and maximum function:
>>> highestremotetolerance(0.0)
>>> derived.highestremotesmoothpar.update()
>>> test()
| ex. | actualremoterelieve |
-----------------------------
| 1 | 0.0 |
| 2 | 1.0 |
| 3 | 2.0 |
| 4 | 3.0 |
| 5 | 4.0 |
| 6 | 4.0 |
| 7 | 4.0 |
| 8 | 4.0 |
Setting a sensible |HighestRemoteTolerance| value results in
a moderate smoothing:
>>> highestremotetolerance(0.1)
>>> derived.highestremotesmoothpar.update()
>>> test()
| ex. | actualremoterelieve |
-----------------------------
| 1 | 0.0 |
| 2 | 0.999999 |
| 3 | 1.99995 |
| 4 | 2.996577 |
| 5 | 3.836069 |
| 6 | 3.991578 |
| 7 | 3.993418 |
| 8 | 3.993442 |
Method |update_actualremoterelieve_v1| is defined in a similar
way as method |calc_actualremoterelieve_v1|. Please read the
documentation on |calc_actualremoterelieve_v1| for further
information.
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
d_smooth = der.highestremotesmoothpar
d_highest = con.highestremotedischarge
d_value = smoothutils.smooth_min1(
flu.actualremoterelieve, d_highest, d_smooth)
for dummy in range(5):
d_smooth /= 5.
d_value = smoothutils.smooth_max1(
d_value, 0., d_smooth)
d_smooth /= 5.
d_value = smoothutils.smooth_min1(
d_value, d_highest, d_smooth)
d_value = min(d_value, flu.actualremoterelieve)
d_value = min(d_value, d_highest)
flu.actualremoterelieve = max(d_value, 0.) | ['def', 'update_actualremoterelieve_v1', '(', 'self', ')', ':', 'con', '=', 'self', '.', 'parameters', '.', 'control', '.', 'fastaccess', 'der', '=', 'self', '.', 'parameters', '.', 'derived', '.', 'fastaccess', 'flu', '=', 'self', '.', 'sequences', '.', 'fluxes', '.', 'fastaccess', 'd_smooth', '=', 'der', '.', 'highestremotesmoothpar', 'd_highest', '=', 'con', '.', 'highestremotedischarge', 'd_value', '=', 'smoothutils', '.', 'smooth_min1', '(', 'flu', '.', 'actualremoterelieve', ',', 'd_highest', ',', 'd_smooth', ')', 'for', 'dummy', 'in', 'range', '(', '5', ')', ':', 'd_smooth', '/=', '5.', 'd_value', '=', 'smoothutils', '.', 'smooth_max1', '(', 'd_value', ',', '0.', ',', 'd_smooth', ')', 'd_smooth', '/=', '5.', 'd_value', '=', 'smoothutils', '.', 'smooth_min1', '(', 'd_value', ',', 'd_highest', ',', 'd_smooth', ')', 'd_value', '=', 'min', '(', 'd_value', ',', 'flu', '.', 'actualremoterelieve', ')', 'd_value', '=', 'min', '(', 'd_value', ',', 'd_highest', ')', 'flu', '.', 'actualremoterelieve', '=', 'max', '(', 'd_value', ',', '0.', ')'] | Constrain the actual relieve discharge to a remote location.
Required control parameter:
|HighestRemoteDischarge|
Required derived parameter:
|HighestRemoteSmoothPar|
Updated flux sequence:
|ActualRemoteRelieve|
Basic equation - discontinous:
:math:`ActualRemoteRelieve = min(ActualRemoteRelease,
HighestRemoteDischarge)`
Basic equation - continous:
:math:`ActualRemoteRelieve = smooth_min1(ActualRemoteRelieve,
HighestRemoteDischarge, HighestRemoteSmoothPar)`
Used auxiliary methods:
|smooth_min1|
|smooth_max1|
Note that the given continous basic equation is a simplification of
the complete algorithm to update |ActualRemoteRelieve|, which also
makes use of |smooth_max1| to prevent from gaining negative values
in a smooth manner.
Examples:
Prepare a dam model:
>>> from hydpy.models.dam import *
>>> parameterstep()
Prepare a test function object that performs eight examples with
|ActualRemoteRelieve| ranging from 0 to 8 m³/s and a fixed
initial value of parameter |HighestRemoteDischarge| of 4 m³/s:
>>> highestremotedischarge(4.0)
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.update_actualremoterelieve_v1,
... last_example=8,
... parseqs=(fluxes.actualremoterelieve,))
>>> test.nexts.actualremoterelieve = range(8)
Through setting the value of |HighestRemoteTolerance| to the
lowest possible value, there is no smoothing. Instead, the
shown relationship agrees with a combination of the discontinuous
minimum and maximum function:
>>> highestremotetolerance(0.0)
>>> derived.highestremotesmoothpar.update()
>>> test()
| ex. | actualremoterelieve |
-----------------------------
| 1 | 0.0 |
| 2 | 1.0 |
| 3 | 2.0 |
| 4 | 3.0 |
| 5 | 4.0 |
| 6 | 4.0 |
| 7 | 4.0 |
| 8 | 4.0 |
Setting a sensible |HighestRemoteTolerance| value results in
a moderate smoothing:
>>> highestremotetolerance(0.1)
>>> derived.highestremotesmoothpar.update()
>>> test()
| ex. | actualremoterelieve |
-----------------------------
| 1 | 0.0 |
| 2 | 0.999999 |
| 3 | 1.99995 |
| 4 | 2.996577 |
| 5 | 3.836069 |
| 6 | 3.991578 |
| 7 | 3.993418 |
| 8 | 3.993442 |
Method |update_actualremoterelieve_v1| is defined in a similar
way as method |calc_actualremoterelieve_v1|. Please read the
documentation on |calc_actualremoterelieve_v1| for further
information. | ['Constrain', 'the', 'actual', 'relieve', 'discharge', 'to', 'a', 'remote', 'location', '.'] | train | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/dam/dam_model.py#L1765-L1869 |
8,607 | fprimex/zdesk | zdesk/zdesk_api.py | ZendeskAPI.stream_agents_list | def stream_agents_list(self, department_id=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/apis#get-all-agents-status"
api_path = "/stream/agents"
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if department_id:
api_query.update({
"department_id": department_id,
})
return self.call(api_path, query=api_query, **kwargs) | python | def stream_agents_list(self, department_id=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/apis#get-all-agents-status"
api_path = "/stream/agents"
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if department_id:
api_query.update({
"department_id": department_id,
})
return self.call(api_path, query=api_query, **kwargs) | ['def', 'stream_agents_list', '(', 'self', ',', 'department_id', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'api_path', '=', '"/stream/agents"', 'api_query', '=', '{', '}', 'if', '"query"', 'in', 'kwargs', '.', 'keys', '(', ')', ':', 'api_query', '.', 'update', '(', 'kwargs', '[', '"query"', ']', ')', 'del', 'kwargs', '[', '"query"', ']', 'if', 'department_id', ':', 'api_query', '.', 'update', '(', '{', '"department_id"', ':', 'department_id', ',', '}', ')', 'return', 'self', '.', 'call', '(', 'api_path', ',', 'query', '=', 'api_query', ',', '*', '*', 'kwargs', ')'] | https://developer.zendesk.com/rest_api/docs/chat/apis#get-all-agents-status | ['https', ':', '//', 'developer', '.', 'zendesk', '.', 'com', '/', 'rest_api', '/', 'docs', '/', 'chat', '/', 'apis#get', '-', 'all', '-', 'agents', '-', 'status'] | train | https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L3191-L3202 |
8,608 | RedFantom/ttkwidgets | ttkwidgets/timeline.py | TimeLine._time_marker_move | def _time_marker_move(self, event):
"""Callback for <B1-Motion> Event: Move the selected marker"""
limit = self.pixel_width
x = self._canvas_ticks.canvasx(event.x)
x = min(max(x, 0), limit)
_, y = self._canvas_ticks.coords(self._time_marker_image)
self._canvas_ticks.coords(self._time_marker_image, x, y)
self._timeline.coords(self._time_marker_line, x, 0, x, self._timeline.winfo_height())
self._time_show() | python | def _time_marker_move(self, event):
"""Callback for <B1-Motion> Event: Move the selected marker"""
limit = self.pixel_width
x = self._canvas_ticks.canvasx(event.x)
x = min(max(x, 0), limit)
_, y = self._canvas_ticks.coords(self._time_marker_image)
self._canvas_ticks.coords(self._time_marker_image, x, y)
self._timeline.coords(self._time_marker_line, x, 0, x, self._timeline.winfo_height())
self._time_show() | ['def', '_time_marker_move', '(', 'self', ',', 'event', ')', ':', 'limit', '=', 'self', '.', 'pixel_width', 'x', '=', 'self', '.', '_canvas_ticks', '.', 'canvasx', '(', 'event', '.', 'x', ')', 'x', '=', 'min', '(', 'max', '(', 'x', ',', '0', ')', ',', 'limit', ')', '_', ',', 'y', '=', 'self', '.', '_canvas_ticks', '.', 'coords', '(', 'self', '.', '_time_marker_image', ')', 'self', '.', '_canvas_ticks', '.', 'coords', '(', 'self', '.', '_time_marker_image', ',', 'x', ',', 'y', ')', 'self', '.', '_timeline', '.', 'coords', '(', 'self', '.', '_time_marker_line', ',', 'x', ',', '0', ',', 'x', ',', 'self', '.', '_timeline', '.', 'winfo_height', '(', ')', ')', 'self', '.', '_time_show', '(', ')'] | Callback for <B1-Motion> Event: Move the selected marker | ['Callback', 'for', '<B1', '-', 'Motion', '>', 'Event', ':', 'Move', 'the', 'selected', 'marker'] | train | https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/timeline.py#L649-L657 |
8,609 | bxlab/bx-python | lib/bx/align/maf.py | parse_attributes | def parse_attributes( fields ):
"""Parse list of key=value strings into a dict"""
attributes = {}
for field in fields:
pair = field.split( '=' )
attributes[ pair[0] ] = pair[1]
return attributes | python | def parse_attributes( fields ):
"""Parse list of key=value strings into a dict"""
attributes = {}
for field in fields:
pair = field.split( '=' )
attributes[ pair[0] ] = pair[1]
return attributes | ['def', 'parse_attributes', '(', 'fields', ')', ':', 'attributes', '=', '{', '}', 'for', 'field', 'in', 'fields', ':', 'pair', '=', 'field', '.', 'split', '(', "'='", ')', 'attributes', '[', 'pair', '[', '0', ']', ']', '=', 'pair', '[', '1', ']', 'return', 'attributes'] | Parse list of key=value strings into a dict | ['Parse', 'list', 'of', 'key', '=', 'value', 'strings', 'into', 'a', 'dict'] | train | https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/maf.py#L212-L218 |
8,610 | carta/ldap_tools | src/ldap_tools/client.py | Client.get_max_id | def get_max_id(self, object_type, role):
"""Get the highest used ID."""
if object_type == 'user':
objectclass = 'posixAccount'
ldap_attr = 'uidNumber'
elif object_type == 'group': # pragma: no cover
objectclass = 'posixGroup'
ldap_attr = 'gidNumber'
else:
raise ldap_tools.exceptions.InvalidResult('Unknown object type')
minID, maxID = Client.__set_id_boundary(role)
filter = [
"(objectclass={})".format(objectclass), "({}>={})".format(ldap_attr, minID)
]
if maxID is not None:
filter.append("({}<={})".format(ldap_attr, maxID))
id_list = self.search(filter, [ldap_attr])
if id_list == []:
id = minID
else:
if object_type == 'user':
id = max([i.uidNumber.value for i in id_list]) + 1
elif object_type == 'group':
id = max([i.gidNumber.value for i in id_list]) + 1
else:
raise ldap_tools.exceptions.InvalidResult('Unknown object')
return id | python | def get_max_id(self, object_type, role):
"""Get the highest used ID."""
if object_type == 'user':
objectclass = 'posixAccount'
ldap_attr = 'uidNumber'
elif object_type == 'group': # pragma: no cover
objectclass = 'posixGroup'
ldap_attr = 'gidNumber'
else:
raise ldap_tools.exceptions.InvalidResult('Unknown object type')
minID, maxID = Client.__set_id_boundary(role)
filter = [
"(objectclass={})".format(objectclass), "({}>={})".format(ldap_attr, minID)
]
if maxID is not None:
filter.append("({}<={})".format(ldap_attr, maxID))
id_list = self.search(filter, [ldap_attr])
if id_list == []:
id = minID
else:
if object_type == 'user':
id = max([i.uidNumber.value for i in id_list]) + 1
elif object_type == 'group':
id = max([i.gidNumber.value for i in id_list]) + 1
else:
raise ldap_tools.exceptions.InvalidResult('Unknown object')
return id | ['def', 'get_max_id', '(', 'self', ',', 'object_type', ',', 'role', ')', ':', 'if', 'object_type', '==', "'user'", ':', 'objectclass', '=', "'posixAccount'", 'ldap_attr', '=', "'uidNumber'", 'elif', 'object_type', '==', "'group'", ':', '# pragma: no cover', 'objectclass', '=', "'posixGroup'", 'ldap_attr', '=', "'gidNumber'", 'else', ':', 'raise', 'ldap_tools', '.', 'exceptions', '.', 'InvalidResult', '(', "'Unknown object type'", ')', 'minID', ',', 'maxID', '=', 'Client', '.', '__set_id_boundary', '(', 'role', ')', 'filter', '=', '[', '"(objectclass={})"', '.', 'format', '(', 'objectclass', ')', ',', '"({}>={})"', '.', 'format', '(', 'ldap_attr', ',', 'minID', ')', ']', 'if', 'maxID', 'is', 'not', 'None', ':', 'filter', '.', 'append', '(', '"({}<={})"', '.', 'format', '(', 'ldap_attr', ',', 'maxID', ')', ')', 'id_list', '=', 'self', '.', 'search', '(', 'filter', ',', '[', 'ldap_attr', ']', ')', 'if', 'id_list', '==', '[', ']', ':', 'id', '=', 'minID', 'else', ':', 'if', 'object_type', '==', "'user'", ':', 'id', '=', 'max', '(', '[', 'i', '.', 'uidNumber', '.', 'value', 'for', 'i', 'in', 'id_list', ']', ')', '+', '1', 'elif', 'object_type', '==', "'group'", ':', 'id', '=', 'max', '(', '[', 'i', '.', 'gidNumber', '.', 'value', 'for', 'i', 'in', 'id_list', ']', ')', '+', '1', 'else', ':', 'raise', 'ldap_tools', '.', 'exceptions', '.', 'InvalidResult', '(', "'Unknown object'", ')', 'return', 'id'] | Get the highest used ID. | ['Get', 'the', 'highest', 'used', 'ID', '.'] | train | https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/client.py#L110-L142 |
8,611 | hozn/stravalib | stravalib/client.py | Client.get_activity_comments | def get_activity_comments(self, activity_id, markdown=False, limit=None):
"""
Gets the comments for an activity.
http://strava.github.io/api/v3/comments/#list
:param activity_id: The activity for which to fetch comments.
:type activity_id: int
:param markdown: Whether to include markdown in comments (default is false/filterout).
:type markdown: bool
:param limit: Max rows to return (default unlimited).
:type limit: int
:return: An iterator of :class:`stravalib.model.ActivityComment` objects.
:rtype: :class:`BatchedResultsIterator`
"""
result_fetcher = functools.partial(self.protocol.get, '/activities/{id}/comments',
id=activity_id, markdown=int(markdown))
return BatchedResultsIterator(entity=model.ActivityComment,
bind_client=self,
result_fetcher=result_fetcher,
limit=limit) | python | def get_activity_comments(self, activity_id, markdown=False, limit=None):
"""
Gets the comments for an activity.
http://strava.github.io/api/v3/comments/#list
:param activity_id: The activity for which to fetch comments.
:type activity_id: int
:param markdown: Whether to include markdown in comments (default is false/filterout).
:type markdown: bool
:param limit: Max rows to return (default unlimited).
:type limit: int
:return: An iterator of :class:`stravalib.model.ActivityComment` objects.
:rtype: :class:`BatchedResultsIterator`
"""
result_fetcher = functools.partial(self.protocol.get, '/activities/{id}/comments',
id=activity_id, markdown=int(markdown))
return BatchedResultsIterator(entity=model.ActivityComment,
bind_client=self,
result_fetcher=result_fetcher,
limit=limit) | ['def', 'get_activity_comments', '(', 'self', ',', 'activity_id', ',', 'markdown', '=', 'False', ',', 'limit', '=', 'None', ')', ':', 'result_fetcher', '=', 'functools', '.', 'partial', '(', 'self', '.', 'protocol', '.', 'get', ',', "'/activities/{id}/comments'", ',', 'id', '=', 'activity_id', ',', 'markdown', '=', 'int', '(', 'markdown', ')', ')', 'return', 'BatchedResultsIterator', '(', 'entity', '=', 'model', '.', 'ActivityComment', ',', 'bind_client', '=', 'self', ',', 'result_fetcher', '=', 'result_fetcher', ',', 'limit', '=', 'limit', ')'] | Gets the comments for an activity.
http://strava.github.io/api/v3/comments/#list
:param activity_id: The activity for which to fetch comments.
:type activity_id: int
:param markdown: Whether to include markdown in comments (default is false/filterout).
:type markdown: bool
:param limit: Max rows to return (default unlimited).
:type limit: int
:return: An iterator of :class:`stravalib.model.ActivityComment` objects.
:rtype: :class:`BatchedResultsIterator` | ['Gets', 'the', 'comments', 'for', 'an', 'activity', '.'] | train | https://github.com/hozn/stravalib/blob/5500ebc39e0bf4706bb1ca4c27b25e56becaaa5f/stravalib/client.py#L742-L766 |
8,612 | SeabornGames/RequestClient | seaborn/request_client/repr_wrapper.py | ReprListList.append | def append(self, obj):
"""
If it is a list it will append the obj, if it is a dictionary
it will convert it to a list and append
:param obj: dict or list of the object to append
:return: None
"""
if isinstance(obj, dict) and self._col_names:
obj = [obj.get(col, None) for col in self._col_names]
assert isinstance(obj, list), \
"obj appended to ReprListList needs to be a list or dict"
self._original.append(obj) | python | def append(self, obj):
"""
If it is a list it will append the obj, if it is a dictionary
it will convert it to a list and append
:param obj: dict or list of the object to append
:return: None
"""
if isinstance(obj, dict) and self._col_names:
obj = [obj.get(col, None) for col in self._col_names]
assert isinstance(obj, list), \
"obj appended to ReprListList needs to be a list or dict"
self._original.append(obj) | ['def', 'append', '(', 'self', ',', 'obj', ')', ':', 'if', 'isinstance', '(', 'obj', ',', 'dict', ')', 'and', 'self', '.', '_col_names', ':', 'obj', '=', '[', 'obj', '.', 'get', '(', 'col', ',', 'None', ')', 'for', 'col', 'in', 'self', '.', '_col_names', ']', 'assert', 'isinstance', '(', 'obj', ',', 'list', ')', ',', '"obj appended to ReprListList needs to be a list or dict"', 'self', '.', '_original', '.', 'append', '(', 'obj', ')'] | If it is a list it will append the obj, if it is a dictionary
it will convert it to a list and append
:param obj: dict or list of the object to append
:return: None | ['If', 'it', 'is', 'a', 'list', 'it', 'will', 'append', 'the', 'obj', 'if', 'it', 'is', 'a', 'dictionary', 'it', 'will', 'convert', 'it', 'to', 'a', 'list', 'and', 'append', ':', 'param', 'obj', ':', 'dict', 'or', 'list', 'of', 'the', 'object', 'to', 'append', ':', 'return', ':', 'None'] | train | https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/seaborn/request_client/repr_wrapper.py#L271-L282 |
8,613 | AntagonistHQ/openprovider.py | openprovider/modules/ssl.py | SSLModule.retrieve_product | def retrieve_product(self, product_id):
"""Retrieve details on a single product."""
response = self.request(E.retrieveProductSslCertRequest(
E.id(product_id)
))
return response.as_model(SSLProduct) | python | def retrieve_product(self, product_id):
"""Retrieve details on a single product."""
response = self.request(E.retrieveProductSslCertRequest(
E.id(product_id)
))
return response.as_model(SSLProduct) | ['def', 'retrieve_product', '(', 'self', ',', 'product_id', ')', ':', 'response', '=', 'self', '.', 'request', '(', 'E', '.', 'retrieveProductSslCertRequest', '(', 'E', '.', 'id', '(', 'product_id', ')', ')', ')', 'return', 'response', '.', 'as_model', '(', 'SSLProduct', ')'] | Retrieve details on a single product. | ['Retrieve', 'details', 'on', 'a', 'single', 'product', '.'] | train | https://github.com/AntagonistHQ/openprovider.py/blob/5871c3d5b3661e23667f147f49f20389c817a0a4/openprovider/modules/ssl.py#L36-L43 |
8,614 | Ingener74/Silly-Crossbow | SillyCrossbow/SillyCrossbow/crop.py | crop_image | def crop_image(image, threshold):
"""
Найти непрозрачную область на изображении и вырезать её
:param image: Изображение
:param threshold: Порог прозрачности для обрезания
:return: cropped_image - вырезанное изображение
x, y, width, height - координаты и размер вырезаннго прямоугольника
"""
cropper = CropTransparent(image.width(), image.height(), threshold, str(image.constBits()))
x = cropper.getCroppedOffsetX()
y = cropper.getCroppedOffsetY()
width = cropper.getCroppedWidth()
height = cropper.getCroppedHeight()
cropped_image = image.copy(x, y, width, height)
return cropped_image, x, y, width, height | python | def crop_image(image, threshold):
"""
Найти непрозрачную область на изображении и вырезать её
:param image: Изображение
:param threshold: Порог прозрачности для обрезания
:return: cropped_image - вырезанное изображение
x, y, width, height - координаты и размер вырезаннго прямоугольника
"""
cropper = CropTransparent(image.width(), image.height(), threshold, str(image.constBits()))
x = cropper.getCroppedOffsetX()
y = cropper.getCroppedOffsetY()
width = cropper.getCroppedWidth()
height = cropper.getCroppedHeight()
cropped_image = image.copy(x, y, width, height)
return cropped_image, x, y, width, height | ['def', 'crop_image', '(', 'image', ',', 'threshold', ')', ':', 'cropper', '=', 'CropTransparent', '(', 'image', '.', 'width', '(', ')', ',', 'image', '.', 'height', '(', ')', ',', 'threshold', ',', 'str', '(', 'image', '.', 'constBits', '(', ')', ')', ')', 'x', '=', 'cropper', '.', 'getCroppedOffsetX', '(', ')', 'y', '=', 'cropper', '.', 'getCroppedOffsetY', '(', ')', 'width', '=', 'cropper', '.', 'getCroppedWidth', '(', ')', 'height', '=', 'cropper', '.', 'getCroppedHeight', '(', ')', 'cropped_image', '=', 'image', '.', 'copy', '(', 'x', ',', 'y', ',', 'width', ',', 'height', ')', 'return', 'cropped_image', ',', 'x', ',', 'y', ',', 'width', ',', 'height'] | Найти непрозрачную область на изображении и вырезать её
:param image: Изображение
:param threshold: Порог прозрачности для обрезания
:return: cropped_image - вырезанное изображение
x, y, width, height - координаты и размер вырезаннго прямоугольника | ['Найти', 'непрозрачную', 'область', 'на', 'изображении', 'и', 'вырезать', 'её', ':', 'param', 'image', ':', 'Изображение', ':', 'param', 'threshold', ':', 'Порог', 'прозрачности', 'для', 'обрезания', ':', 'return', ':', 'cropped_image', '-', 'вырезанное', 'изображение', 'x', 'y', 'width', 'height', '-', 'координаты', 'и', 'размер', 'вырезаннго', 'прямоугольника'] | train | https://github.com/Ingener74/Silly-Crossbow/blob/ac70265f885ea6bdcea04c737b0f8528be023263/SillyCrossbow/SillyCrossbow/crop.py#L19-L35 |
8,615 | MAVENSDC/PyTplot | pytplot/tplot_restore.py | tplot_restore | def tplot_restore(filename):
"""
This function will restore tplot variables that have been saved with the "tplot_save" command.
.. note::
This function is compatible with the IDL tplot_save routine.
If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file.
Not all plot options will transfer over at this time.
Parameters:
filename : str
The file name and full path generated by the "tplot_save" command.
Returns:
None
Examples:
>>> # Restore the saved data from the tplot_save example
>>> import pytplot
>>> pytplot.restore('C:/temp/variable1.pytplot')
"""
#Error check
if not (os.path.isfile(filename)):
print("Not a valid file name")
return
#Check if the restored file was an IDL file
if filename.endswith('.tplot'):
temp_tplot = readsav(filename)
for i in range(len(temp_tplot['dq'])):
data_name = temp_tplot['dq'][i][0].decode("utf-8")
temp_x_data = temp_tplot['dq'][i][1][0][0]
#Pandas reads in data the other way I guess
if len(temp_tplot['dq'][i][1][0][2].shape) == 2:
temp_y_data = np.transpose(temp_tplot['dq'][i][1][0][2])
else:
temp_y_data = temp_tplot['dq'][i][1][0][2]
#If there are more than 4 fields, that means it is a spectrogram
if len(temp_tplot['dq'][i][1][0]) > 4:
temp_v_data = temp_tplot['dq'][i][1][0][4]
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if (temp_x_data.dtype.byteorder == '>'):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == '>'):
temp_y_data = temp_y_data.byteswap().newbyteorder()
if (temp_v_data.dtype.byteorder == '>'):
temp_v_data = temp_v_data.byteswap().newbyteorder()
store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data, 'v':temp_v_data})
else:
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if (temp_x_data.dtype.byteorder == '>'):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == '>'):
temp_y_data = temp_y_data.byteswap().newbyteorder()
store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data})
if temp_tplot['dq'][i][3].dtype.names is not None:
for option_name in temp_tplot['dq'][i][3].dtype.names:
options(data_name, option_name, temp_tplot['dq'][i][3][option_name][0])
data_quants[data_name].trange = temp_tplot['dq'][i][4].tolist()
data_quants[data_name].dtype = temp_tplot['dq'][i][5]
data_quants[data_name].create_time = temp_tplot['dq'][i][6]
for option_name in temp_tplot['tv'][0][0].dtype.names:
if option_name == 'TRANGE':
tplot_options('x_range', temp_tplot['tv'][0][0][option_name][0])
if option_name == 'WSIZE':
tplot_options('wsize', temp_tplot['tv'][0][0][option_name][0])
if option_name == 'VAR_LABEL':
tplot_options('var_label', temp_tplot['tv'][0][0][option_name][0])
if 'P' in temp_tplot['tv'][0][1].tolist():
for option_name in temp_tplot['tv'][0][1]['P'][0].dtype.names:
if option_name == 'TITLE':
tplot_options('title', temp_tplot['tv'][0][1]['P'][0][option_name][0])
#temp_tplot['tv'][0][1] is all of the "settings" variables
#temp_tplot['tv'][0][1]['D'][0] is "device" options
#temp_tplot['tv'][0][1]['P'][0] is "plot" options
#temp_tplot['tv'][0][1]['X'][0] is x axis options
#temp_tplot['tv'][0][1]['Y'][0] is y axis options
####################################################################
else:
temp = pickle.load(open(filename,"rb"))
num_data_quants = temp[0]
for i in range(0, num_data_quants):
data_quants[temp[i+1].name] = temp[i+1]
tplot_opt_glob = temp[num_data_quants+1]
return | python | def tplot_restore(filename):
"""
This function will restore tplot variables that have been saved with the "tplot_save" command.
.. note::
This function is compatible with the IDL tplot_save routine.
If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file.
Not all plot options will transfer over at this time.
Parameters:
filename : str
The file name and full path generated by the "tplot_save" command.
Returns:
None
Examples:
>>> # Restore the saved data from the tplot_save example
>>> import pytplot
>>> pytplot.restore('C:/temp/variable1.pytplot')
"""
#Error check
if not (os.path.isfile(filename)):
print("Not a valid file name")
return
#Check if the restored file was an IDL file
if filename.endswith('.tplot'):
temp_tplot = readsav(filename)
for i in range(len(temp_tplot['dq'])):
data_name = temp_tplot['dq'][i][0].decode("utf-8")
temp_x_data = temp_tplot['dq'][i][1][0][0]
#Pandas reads in data the other way I guess
if len(temp_tplot['dq'][i][1][0][2].shape) == 2:
temp_y_data = np.transpose(temp_tplot['dq'][i][1][0][2])
else:
temp_y_data = temp_tplot['dq'][i][1][0][2]
#If there are more than 4 fields, that means it is a spectrogram
if len(temp_tplot['dq'][i][1][0]) > 4:
temp_v_data = temp_tplot['dq'][i][1][0][4]
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if (temp_x_data.dtype.byteorder == '>'):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == '>'):
temp_y_data = temp_y_data.byteswap().newbyteorder()
if (temp_v_data.dtype.byteorder == '>'):
temp_v_data = temp_v_data.byteswap().newbyteorder()
store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data, 'v':temp_v_data})
else:
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if (temp_x_data.dtype.byteorder == '>'):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == '>'):
temp_y_data = temp_y_data.byteswap().newbyteorder()
store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data})
if temp_tplot['dq'][i][3].dtype.names is not None:
for option_name in temp_tplot['dq'][i][3].dtype.names:
options(data_name, option_name, temp_tplot['dq'][i][3][option_name][0])
data_quants[data_name].trange = temp_tplot['dq'][i][4].tolist()
data_quants[data_name].dtype = temp_tplot['dq'][i][5]
data_quants[data_name].create_time = temp_tplot['dq'][i][6]
for option_name in temp_tplot['tv'][0][0].dtype.names:
if option_name == 'TRANGE':
tplot_options('x_range', temp_tplot['tv'][0][0][option_name][0])
if option_name == 'WSIZE':
tplot_options('wsize', temp_tplot['tv'][0][0][option_name][0])
if option_name == 'VAR_LABEL':
tplot_options('var_label', temp_tplot['tv'][0][0][option_name][0])
if 'P' in temp_tplot['tv'][0][1].tolist():
for option_name in temp_tplot['tv'][0][1]['P'][0].dtype.names:
if option_name == 'TITLE':
tplot_options('title', temp_tplot['tv'][0][1]['P'][0][option_name][0])
#temp_tplot['tv'][0][1] is all of the "settings" variables
#temp_tplot['tv'][0][1]['D'][0] is "device" options
#temp_tplot['tv'][0][1]['P'][0] is "plot" options
#temp_tplot['tv'][0][1]['X'][0] is x axis options
#temp_tplot['tv'][0][1]['Y'][0] is y axis options
####################################################################
else:
temp = pickle.load(open(filename,"rb"))
num_data_quants = temp[0]
for i in range(0, num_data_quants):
data_quants[temp[i+1].name] = temp[i+1]
tplot_opt_glob = temp[num_data_quants+1]
return | ['def', 'tplot_restore', '(', 'filename', ')', ':', '#Error check', 'if', 'not', '(', 'os', '.', 'path', '.', 'isfile', '(', 'filename', ')', ')', ':', 'print', '(', '"Not a valid file name"', ')', 'return', '#Check if the restored file was an IDL file', 'if', 'filename', '.', 'endswith', '(', "'.tplot'", ')', ':', 'temp_tplot', '=', 'readsav', '(', 'filename', ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'temp_tplot', '[', "'dq'", ']', ')', ')', ':', 'data_name', '=', 'temp_tplot', '[', "'dq'", ']', '[', 'i', ']', '[', '0', ']', '.', 'decode', '(', '"utf-8"', ')', 'temp_x_data', '=', 'temp_tplot', '[', "'dq'", ']', '[', 'i', ']', '[', '1', ']', '[', '0', ']', '[', '0', ']', '#Pandas reads in data the other way I guess', 'if', 'len', '(', 'temp_tplot', '[', "'dq'", ']', '[', 'i', ']', '[', '1', ']', '[', '0', ']', '[', '2', ']', '.', 'shape', ')', '==', '2', ':', 'temp_y_data', '=', 'np', '.', 'transpose', '(', 'temp_tplot', '[', "'dq'", ']', '[', 'i', ']', '[', '1', ']', '[', '0', ']', '[', '2', ']', ')', 'else', ':', 'temp_y_data', '=', 'temp_tplot', '[', "'dq'", ']', '[', 'i', ']', '[', '1', ']', '[', '0', ']', '[', '2', ']', '#If there are more than 4 fields, that means it is a spectrogram ', 'if', 'len', '(', 'temp_tplot', '[', "'dq'", ']', '[', 'i', ']', '[', '1', ']', '[', '0', ']', ')', '>', '4', ':', 'temp_v_data', '=', 'temp_tplot', '[', "'dq'", ']', '[', 'i', ']', '[', '1', ']', '[', '0', ']', '[', '4', ']', '#Change from little endian to big endian, since pandas apparently hates little endian', '#We might want to move this into the store_data procedure eventually', 'if', '(', 'temp_x_data', '.', 'dtype', '.', 'byteorder', '==', "'>'", ')', ':', 'temp_x_data', '=', 'temp_x_data', '.', 'byteswap', '(', ')', '.', 'newbyteorder', '(', ')', 'if', '(', 'temp_y_data', '.', 'dtype', '.', 'byteorder', '==', "'>'", ')', ':', 'temp_y_data', '=', 'temp_y_data', '.', 'byteswap', '(', ')', '.', 'newbyteorder', '(', ')', 'if', '(', 'temp_v_data', '.', 'dtype', '.', 'byteorder', '==', "'>'", ')', ':', 'temp_v_data', '=', 'temp_v_data', '.', 'byteswap', '(', ')', '.', 'newbyteorder', '(', ')', 'store_data', '(', 'data_name', ',', 'data', '=', '{', "'x'", ':', 'temp_x_data', ',', "'y'", ':', 'temp_y_data', ',', "'v'", ':', 'temp_v_data', '}', ')', 'else', ':', '#Change from little endian to big endian, since pandas apparently hates little endian', '#We might want to move this into the store_data procedure eventually', 'if', '(', 'temp_x_data', '.', 'dtype', '.', 'byteorder', '==', "'>'", ')', ':', 'temp_x_data', '=', 'temp_x_data', '.', 'byteswap', '(', ')', '.', 'newbyteorder', '(', ')', 'if', '(', 'temp_y_data', '.', 'dtype', '.', 'byteorder', '==', "'>'", ')', ':', 'temp_y_data', '=', 'temp_y_data', '.', 'byteswap', '(', ')', '.', 'newbyteorder', '(', ')', 'store_data', '(', 'data_name', ',', 'data', '=', '{', "'x'", ':', 'temp_x_data', ',', "'y'", ':', 'temp_y_data', '}', ')', 'if', 'temp_tplot', '[', "'dq'", ']', '[', 'i', ']', '[', '3', ']', '.', 'dtype', '.', 'names', 'is', 'not', 'None', ':', 'for', 'option_name', 'in', 'temp_tplot', '[', "'dq'", ']', '[', 'i', ']', '[', '3', ']', '.', 'dtype', '.', 'names', ':', 'options', '(', 'data_name', ',', 'option_name', ',', 'temp_tplot', '[', "'dq'", ']', '[', 'i', ']', '[', '3', ']', '[', 'option_name', ']', '[', '0', ']', ')', 'data_quants', '[', 'data_name', ']', '.', 'trange', '=', 'temp_tplot', '[', "'dq'", ']', '[', 'i', ']', '[', '4', ']', '.', 'tolist', '(', ')', 'data_quants', '[', 'data_name', ']', '.', 'dtype', '=', 'temp_tplot', '[', "'dq'", ']', '[', 'i', ']', '[', '5', ']', 'data_quants', '[', 'data_name', ']', '.', 'create_time', '=', 'temp_tplot', '[', "'dq'", ']', '[', 'i', ']', '[', '6', ']', 'for', 'option_name', 'in', 'temp_tplot', '[', "'tv'", ']', '[', '0', ']', '[', '0', ']', '.', 'dtype', '.', 'names', ':', 'if', 'option_name', '==', "'TRANGE'", ':', 'tplot_options', '(', "'x_range'", ',', 'temp_tplot', '[', "'tv'", ']', '[', '0', ']', '[', '0', ']', '[', 'option_name', ']', '[', '0', ']', ')', 'if', 'option_name', '==', "'WSIZE'", ':', 'tplot_options', '(', "'wsize'", ',', 'temp_tplot', '[', "'tv'", ']', '[', '0', ']', '[', '0', ']', '[', 'option_name', ']', '[', '0', ']', ')', 'if', 'option_name', '==', "'VAR_LABEL'", ':', 'tplot_options', '(', "'var_label'", ',', 'temp_tplot', '[', "'tv'", ']', '[', '0', ']', '[', '0', ']', '[', 'option_name', ']', '[', '0', ']', ')', 'if', "'P'", 'in', 'temp_tplot', '[', "'tv'", ']', '[', '0', ']', '[', '1', ']', '.', 'tolist', '(', ')', ':', 'for', 'option_name', 'in', 'temp_tplot', '[', "'tv'", ']', '[', '0', ']', '[', '1', ']', '[', "'P'", ']', '[', '0', ']', '.', 'dtype', '.', 'names', ':', 'if', 'option_name', '==', "'TITLE'", ':', 'tplot_options', '(', "'title'", ',', 'temp_tplot', '[', "'tv'", ']', '[', '0', ']', '[', '1', ']', '[', "'P'", ']', '[', '0', ']', '[', 'option_name', ']', '[', '0', ']', ')', '#temp_tplot[\'tv\'][0][1] is all of the "settings" variables', '#temp_tplot[\'tv\'][0][1][\'D\'][0] is "device" options', '#temp_tplot[\'tv\'][0][1][\'P\'][0] is "plot" options', "#temp_tplot['tv'][0][1]['X'][0] is x axis options", "#temp_tplot['tv'][0][1]['Y'][0] is y axis options", '####################################################################', 'else', ':', 'temp', '=', 'pickle', '.', 'load', '(', 'open', '(', 'filename', ',', '"rb"', ')', ')', 'num_data_quants', '=', 'temp', '[', '0', ']', 'for', 'i', 'in', 'range', '(', '0', ',', 'num_data_quants', ')', ':', 'data_quants', '[', 'temp', '[', 'i', '+', '1', ']', '.', 'name', ']', '=', 'temp', '[', 'i', '+', '1', ']', 'tplot_opt_glob', '=', 'temp', '[', 'num_data_quants', '+', '1', ']', 'return'] | This function will restore tplot variables that have been saved with the "tplot_save" command.
.. note::
This function is compatible with the IDL tplot_save routine.
If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file.
Not all plot options will transfer over at this time.
Parameters:
filename : str
The file name and full path generated by the "tplot_save" command.
Returns:
None
Examples:
>>> # Restore the saved data from the tplot_save example
>>> import pytplot
>>> pytplot.restore('C:/temp/variable1.pytplot') | ['This', 'function', 'will', 'restore', 'tplot', 'variables', 'that', 'have', 'been', 'saved', 'with', 'the', 'tplot_save', 'command', '.', '..', 'note', '::', 'This', 'function', 'is', 'compatible', 'with', 'the', 'IDL', 'tplot_save', 'routine', '.', 'If', 'you', 'have', 'a', '.', 'tplot', 'file', 'generated', 'from', 'IDL', 'this', 'procedure', 'will', 'restore', 'the', 'data', 'contained', 'in', 'the', 'file', '.', 'Not', 'all', 'plot', 'options', 'will', 'transfer', 'over', 'at', 'this', 'time', '.', 'Parameters', ':', 'filename', ':', 'str', 'The', 'file', 'name', 'and', 'full', 'path', 'generated', 'by', 'the', 'tplot_save', 'command', '.', 'Returns', ':', 'None', 'Examples', ':', '>>>', '#', 'Restore', 'the', 'saved', 'data', 'from', 'the', 'tplot_save', 'example', '>>>', 'import', 'pytplot', '>>>', 'pytplot', '.', 'restore', '(', 'C', ':', '/', 'temp', '/', 'variable1', '.', 'pytplot', ')'] | train | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/tplot_restore.py#L16-L114 |
8,616 | ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_serial.py | SerialModule.cmd_serial | def cmd_serial(self, args):
'''serial control commands'''
usage = "Usage: serial <lock|unlock|set|send>"
if len(args) < 1:
print(usage)
return
if args[0] == "lock":
self.serial_lock(True)
elif args[0] == "unlock":
self.serial_lock(False)
elif args[0] == "set":
self.serial_settings.command(args[1:])
elif args[0] == "send":
self.serial_send(args[1:])
else:
print(usage) | python | def cmd_serial(self, args):
'''serial control commands'''
usage = "Usage: serial <lock|unlock|set|send>"
if len(args) < 1:
print(usage)
return
if args[0] == "lock":
self.serial_lock(True)
elif args[0] == "unlock":
self.serial_lock(False)
elif args[0] == "set":
self.serial_settings.command(args[1:])
elif args[0] == "send":
self.serial_send(args[1:])
else:
print(usage) | ['def', 'cmd_serial', '(', 'self', ',', 'args', ')', ':', 'usage', '=', '"Usage: serial <lock|unlock|set|send>"', 'if', 'len', '(', 'args', ')', '<', '1', ':', 'print', '(', 'usage', ')', 'return', 'if', 'args', '[', '0', ']', '==', '"lock"', ':', 'self', '.', 'serial_lock', '(', 'True', ')', 'elif', 'args', '[', '0', ']', '==', '"unlock"', ':', 'self', '.', 'serial_lock', '(', 'False', ')', 'elif', 'args', '[', '0', ']', '==', '"set"', ':', 'self', '.', 'serial_settings', '.', 'command', '(', 'args', '[', '1', ':', ']', ')', 'elif', 'args', '[', '0', ']', '==', '"send"', ':', 'self', '.', 'serial_send', '(', 'args', '[', '1', ':', ']', ')', 'else', ':', 'print', '(', 'usage', ')'] | serial control commands | ['serial', 'control', 'commands'] | train | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_serial.py#L67-L82 |
8,617 | pantsbuild/pants | src/python/pants/base/build_file.py | BuildFile.scan_build_files | def scan_build_files(project_tree, base_relpath, build_ignore_patterns=None):
"""Looks for all BUILD files
:param project_tree: Project tree to scan in.
:type project_tree: :class:`pants.base.project_tree.ProjectTree`
:param base_relpath: Directory under root_dir to scan.
:param build_ignore_patterns: .gitignore like patterns to exclude from BUILD files scan.
:type build_ignore_patterns: pathspec.pathspec.PathSpec
"""
if base_relpath and os.path.isabs(base_relpath):
raise BuildFile.BadPathError('base_relpath parameter ({}) should be a relative path.'
.format(base_relpath))
if base_relpath and not project_tree.isdir(base_relpath):
raise BuildFile.BadPathError('Can only scan directories and {0} is not a valid dir.'
.format(base_relpath))
if build_ignore_patterns and not isinstance(build_ignore_patterns, PathSpec):
raise TypeError("build_ignore_patterns should be pathspec.pathspec.PathSpec instance, "
"instead {} was given.".format(type(build_ignore_patterns)))
build_files = set()
for root, dirs, files in project_tree.walk(base_relpath or '', topdown=True):
excluded_dirs = list(build_ignore_patterns.match_files('{}/'.format(os.path.join(root, dirname))
for dirname in dirs))
for subdir in excluded_dirs:
# Remove trailing '/' from paths which were added to indicate that paths are paths to directories.
dirs.remove(fast_relpath(subdir, root)[:-1])
for filename in files:
if BuildFile._is_buildfile_name(filename):
build_files.add(os.path.join(root, filename))
return BuildFile._build_files_from_paths(project_tree, build_files, build_ignore_patterns) | python | def scan_build_files(project_tree, base_relpath, build_ignore_patterns=None):
"""Looks for all BUILD files
:param project_tree: Project tree to scan in.
:type project_tree: :class:`pants.base.project_tree.ProjectTree`
:param base_relpath: Directory under root_dir to scan.
:param build_ignore_patterns: .gitignore like patterns to exclude from BUILD files scan.
:type build_ignore_patterns: pathspec.pathspec.PathSpec
"""
if base_relpath and os.path.isabs(base_relpath):
raise BuildFile.BadPathError('base_relpath parameter ({}) should be a relative path.'
.format(base_relpath))
if base_relpath and not project_tree.isdir(base_relpath):
raise BuildFile.BadPathError('Can only scan directories and {0} is not a valid dir.'
.format(base_relpath))
if build_ignore_patterns and not isinstance(build_ignore_patterns, PathSpec):
raise TypeError("build_ignore_patterns should be pathspec.pathspec.PathSpec instance, "
"instead {} was given.".format(type(build_ignore_patterns)))
build_files = set()
for root, dirs, files in project_tree.walk(base_relpath or '', topdown=True):
excluded_dirs = list(build_ignore_patterns.match_files('{}/'.format(os.path.join(root, dirname))
for dirname in dirs))
for subdir in excluded_dirs:
# Remove trailing '/' from paths which were added to indicate that paths are paths to directories.
dirs.remove(fast_relpath(subdir, root)[:-1])
for filename in files:
if BuildFile._is_buildfile_name(filename):
build_files.add(os.path.join(root, filename))
return BuildFile._build_files_from_paths(project_tree, build_files, build_ignore_patterns) | ['def', 'scan_build_files', '(', 'project_tree', ',', 'base_relpath', ',', 'build_ignore_patterns', '=', 'None', ')', ':', 'if', 'base_relpath', 'and', 'os', '.', 'path', '.', 'isabs', '(', 'base_relpath', ')', ':', 'raise', 'BuildFile', '.', 'BadPathError', '(', "'base_relpath parameter ({}) should be a relative path.'", '.', 'format', '(', 'base_relpath', ')', ')', 'if', 'base_relpath', 'and', 'not', 'project_tree', '.', 'isdir', '(', 'base_relpath', ')', ':', 'raise', 'BuildFile', '.', 'BadPathError', '(', "'Can only scan directories and {0} is not a valid dir.'", '.', 'format', '(', 'base_relpath', ')', ')', 'if', 'build_ignore_patterns', 'and', 'not', 'isinstance', '(', 'build_ignore_patterns', ',', 'PathSpec', ')', ':', 'raise', 'TypeError', '(', '"build_ignore_patterns should be pathspec.pathspec.PathSpec instance, "', '"instead {} was given."', '.', 'format', '(', 'type', '(', 'build_ignore_patterns', ')', ')', ')', 'build_files', '=', 'set', '(', ')', 'for', 'root', ',', 'dirs', ',', 'files', 'in', 'project_tree', '.', 'walk', '(', 'base_relpath', 'or', "''", ',', 'topdown', '=', 'True', ')', ':', 'excluded_dirs', '=', 'list', '(', 'build_ignore_patterns', '.', 'match_files', '(', "'{}/'", '.', 'format', '(', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'dirname', ')', ')', 'for', 'dirname', 'in', 'dirs', ')', ')', 'for', 'subdir', 'in', 'excluded_dirs', ':', "# Remove trailing '/' from paths which were added to indicate that paths are paths to directories.", 'dirs', '.', 'remove', '(', 'fast_relpath', '(', 'subdir', ',', 'root', ')', '[', ':', '-', '1', ']', ')', 'for', 'filename', 'in', 'files', ':', 'if', 'BuildFile', '.', '_is_buildfile_name', '(', 'filename', ')', ':', 'build_files', '.', 'add', '(', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'filename', ')', ')', 'return', 'BuildFile', '.', '_build_files_from_paths', '(', 'project_tree', ',', 'build_files', ',', 'build_ignore_patterns', ')'] | Looks for all BUILD files
:param project_tree: Project tree to scan in.
:type project_tree: :class:`pants.base.project_tree.ProjectTree`
:param base_relpath: Directory under root_dir to scan.
:param build_ignore_patterns: .gitignore like patterns to exclude from BUILD files scan.
:type build_ignore_patterns: pathspec.pathspec.PathSpec | ['Looks', 'for', 'all', 'BUILD', 'files', ':', 'param', 'project_tree', ':', 'Project', 'tree', 'to', 'scan', 'in', '.', ':', 'type', 'project_tree', ':', ':', 'class', ':', 'pants', '.', 'base', '.', 'project_tree', '.', 'ProjectTree', ':', 'param', 'base_relpath', ':', 'Directory', 'under', 'root_dir', 'to', 'scan', '.', ':', 'param', 'build_ignore_patterns', ':', '.', 'gitignore', 'like', 'patterns', 'to', 'exclude', 'from', 'BUILD', 'files', 'scan', '.', ':', 'type', 'build_ignore_patterns', ':', 'pathspec', '.', 'pathspec', '.', 'PathSpec'] | train | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/base/build_file.py#L55-L84 |
8,618 | NiklasRosenstein-Python/nr-deprecated | nr/concurrency.py | Job.get | def get(self, default=None):
"""
Get the result of the Job, or return *default* if the job is not finished
or errored. This function will never explicitly raise an exception. Note
that the *default* value is also returned if the job was cancelled.
# Arguments
default (any): The value to return when the result can not be obtained.
"""
if not self.__cancelled and self.__state == Job.SUCCESS:
return self.__result
else:
return default | python | def get(self, default=None):
"""
Get the result of the Job, or return *default* if the job is not finished
or errored. This function will never explicitly raise an exception. Note
that the *default* value is also returned if the job was cancelled.
# Arguments
default (any): The value to return when the result can not be obtained.
"""
if not self.__cancelled and self.__state == Job.SUCCESS:
return self.__result
else:
return default | ['def', 'get', '(', 'self', ',', 'default', '=', 'None', ')', ':', 'if', 'not', 'self', '.', '__cancelled', 'and', 'self', '.', '__state', '==', 'Job', '.', 'SUCCESS', ':', 'return', 'self', '.', '__result', 'else', ':', 'return', 'default'] | Get the result of the Job, or return *default* if the job is not finished
or errored. This function will never explicitly raise an exception. Note
that the *default* value is also returned if the job was cancelled.
# Arguments
default (any): The value to return when the result can not be obtained. | ['Get', 'the', 'result', 'of', 'the', 'Job', 'or', 'return', '*', 'default', '*', 'if', 'the', 'job', 'is', 'not', 'finished', 'or', 'errored', '.', 'This', 'function', 'will', 'never', 'explicitly', 'raise', 'an', 'exception', '.', 'Note', 'that', 'the', '*', 'default', '*', 'value', 'is', 'also', 'returned', 'if', 'the', 'job', 'was', 'cancelled', '.'] | train | https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/concurrency.py#L443-L456 |
8,619 | delfick/aws_syncr | aws_syncr/actions.py | deploy_gateway | def deploy_gateway(collector):
"""Deploy the apigateway to a particular stage"""
configuration = collector.configuration
aws_syncr = configuration['aws_syncr']
aws_syncr, amazon, stage, gateway = find_gateway(aws_syncr, configuration)
gateway.deploy(aws_syncr, amazon, stage)
if not configuration['amazon'].changes:
log.info("No changes were made!!") | python | def deploy_gateway(collector):
"""Deploy the apigateway to a particular stage"""
configuration = collector.configuration
aws_syncr = configuration['aws_syncr']
aws_syncr, amazon, stage, gateway = find_gateway(aws_syncr, configuration)
gateway.deploy(aws_syncr, amazon, stage)
if not configuration['amazon'].changes:
log.info("No changes were made!!") | ['def', 'deploy_gateway', '(', 'collector', ')', ':', 'configuration', '=', 'collector', '.', 'configuration', 'aws_syncr', '=', 'configuration', '[', "'aws_syncr'", ']', 'aws_syncr', ',', 'amazon', ',', 'stage', ',', 'gateway', '=', 'find_gateway', '(', 'aws_syncr', ',', 'configuration', ')', 'gateway', '.', 'deploy', '(', 'aws_syncr', ',', 'amazon', ',', 'stage', ')', 'if', 'not', 'configuration', '[', "'amazon'", ']', '.', 'changes', ':', 'log', '.', 'info', '(', '"No changes were made!!"', ')'] | Deploy the apigateway to a particular stage | ['Deploy', 'the', 'apigateway', 'to', 'a', 'particular', 'stage'] | train | https://github.com/delfick/aws_syncr/blob/8cd214b27c1eee98dfba4632cbb8bc0ae36356bd/aws_syncr/actions.py#L188-L196 |
8,620 | DataBiosphere/toil | src/toil/provisioners/azure/azureProvisioner.py | AzureProvisioner._checkIfClusterExists | def _checkIfClusterExists(self):
"""
Try deleting the resource group. This will fail if it exists and raise an exception.
"""
ansibleArgs = {
'resgrp': self.clusterName,
'region': self._zone
}
try:
self.callPlaybook(self.playbook['check-cluster'], ansibleArgs, wait=True)
except RuntimeError:
logger.info("The cluster could not be created. Try deleting the cluster if it already exits.")
raise | python | def _checkIfClusterExists(self):
"""
Try deleting the resource group. This will fail if it exists and raise an exception.
"""
ansibleArgs = {
'resgrp': self.clusterName,
'region': self._zone
}
try:
self.callPlaybook(self.playbook['check-cluster'], ansibleArgs, wait=True)
except RuntimeError:
logger.info("The cluster could not be created. Try deleting the cluster if it already exits.")
raise | ['def', '_checkIfClusterExists', '(', 'self', ')', ':', 'ansibleArgs', '=', '{', "'resgrp'", ':', 'self', '.', 'clusterName', ',', "'region'", ':', 'self', '.', '_zone', '}', 'try', ':', 'self', '.', 'callPlaybook', '(', 'self', '.', 'playbook', '[', "'check-cluster'", ']', ',', 'ansibleArgs', ',', 'wait', '=', 'True', ')', 'except', 'RuntimeError', ':', 'logger', '.', 'info', '(', '"The cluster could not be created. Try deleting the cluster if it already exits."', ')', 'raise'] | Try deleting the resource group. This will fail if it exists and raise an exception. | ['Try', 'deleting', 'the', 'resource', 'group', '.', 'This', 'will', 'fail', 'if', 'it', 'exists', 'and', 'raise', 'an', 'exception', '.'] | train | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/azure/azureProvisioner.py#L206-L218 |
8,621 | jeremyschulman/halutz | halutz/class_factory.py | SchemaObjectFactory.schema_class | def schema_class(self, object_schema, model_name, classes=False):
"""
Create a object-class based on the object_schema. Use
this class to create specific instances, and validate the
data values. See the "python-jsonschema-objects" package
for details on further usage.
Parameters
----------
object_schema : dict
The JSON-schema that defines the object
model_name : str
if provided, the name given to the new class. if not
provided, then the name will be determined by
one of the following schema values, in this order:
['x-model', 'title', 'id']
classes : bool
When `True`, this method will return the complete
dictionary of all resolved object-classes built
from the object_schema. This can be helpful
when a deeply nested object_schema is provided; but
generally not necessary. You can then create
a :class:`Namespace` instance using this dict. See
the 'python-jschonschema-objects.utls' package
for further details.
When `False` (default), return only the object-class
Returns
-------
- new class for given object_schema (default)
- dict of all classes when :param:`classes` is True
"""
# if not model_name:
# model_name = SchemaObjectFactory.schema_model_name(object_schema)
cls_bldr = ClassBuilder(self.resolver)
model_cls = cls_bldr.construct(model_name, object_schema)
# if `classes` is False(0) return the new model class,
# else return all the classes resolved
model_cls.proptype = SchemaObjectFactory.proptype
return [model_cls, cls_bldr.resolved][classes] | python | def schema_class(self, object_schema, model_name, classes=False):
"""
Create a object-class based on the object_schema. Use
this class to create specific instances, and validate the
data values. See the "python-jsonschema-objects" package
for details on further usage.
Parameters
----------
object_schema : dict
The JSON-schema that defines the object
model_name : str
if provided, the name given to the new class. if not
provided, then the name will be determined by
one of the following schema values, in this order:
['x-model', 'title', 'id']
classes : bool
When `True`, this method will return the complete
dictionary of all resolved object-classes built
from the object_schema. This can be helpful
when a deeply nested object_schema is provided; but
generally not necessary. You can then create
a :class:`Namespace` instance using this dict. See
the 'python-jschonschema-objects.utls' package
for further details.
When `False` (default), return only the object-class
Returns
-------
- new class for given object_schema (default)
- dict of all classes when :param:`classes` is True
"""
# if not model_name:
# model_name = SchemaObjectFactory.schema_model_name(object_schema)
cls_bldr = ClassBuilder(self.resolver)
model_cls = cls_bldr.construct(model_name, object_schema)
# if `classes` is False(0) return the new model class,
# else return all the classes resolved
model_cls.proptype = SchemaObjectFactory.proptype
return [model_cls, cls_bldr.resolved][classes] | ['def', 'schema_class', '(', 'self', ',', 'object_schema', ',', 'model_name', ',', 'classes', '=', 'False', ')', ':', '# if not model_name:', '# model_name = SchemaObjectFactory.schema_model_name(object_schema)', 'cls_bldr', '=', 'ClassBuilder', '(', 'self', '.', 'resolver', ')', 'model_cls', '=', 'cls_bldr', '.', 'construct', '(', 'model_name', ',', 'object_schema', ')', '# if `classes` is False(0) return the new model class,', '# else return all the classes resolved', 'model_cls', '.', 'proptype', '=', 'SchemaObjectFactory', '.', 'proptype', 'return', '[', 'model_cls', ',', 'cls_bldr', '.', 'resolved', ']', '[', 'classes', ']'] | Create a object-class based on the object_schema. Use
this class to create specific instances, and validate the
data values. See the "python-jsonschema-objects" package
for details on further usage.
Parameters
----------
object_schema : dict
The JSON-schema that defines the object
model_name : str
if provided, the name given to the new class. if not
provided, then the name will be determined by
one of the following schema values, in this order:
['x-model', 'title', 'id']
classes : bool
When `True`, this method will return the complete
dictionary of all resolved object-classes built
from the object_schema. This can be helpful
when a deeply nested object_schema is provided; but
generally not necessary. You can then create
a :class:`Namespace` instance using this dict. See
the 'python-jschonschema-objects.utls' package
for further details.
When `False` (default), return only the object-class
Returns
-------
- new class for given object_schema (default)
- dict of all classes when :param:`classes` is True | ['Create', 'a', 'object', '-', 'class', 'based', 'on', 'the', 'object_schema', '.', 'Use', 'this', 'class', 'to', 'create', 'specific', 'instances', 'and', 'validate', 'the', 'data', 'values', '.', 'See', 'the', 'python', '-', 'jsonschema', '-', 'objects', 'package', 'for', 'details', 'on', 'further', 'usage', '.'] | train | https://github.com/jeremyschulman/halutz/blob/6bb398dc99bf723daabd9eda02494a11252ee109/halutz/class_factory.py#L45-L90 |
8,622 | contentful/contentful-management.py | contentful_management/resource.py | Resource.base_url | def base_url(klass, space_id='', resource_id=None, environment_id=None, **kwargs):
"""
Returns the URI for the resource.
"""
url = "spaces/{0}".format(
space_id)
if environment_id is not None:
url = url = "{0}/environments/{1}".format(url, environment_id)
url = "{0}/{1}".format(
url,
base_path_for(klass.__name__)
)
if resource_id:
url = "{0}/{1}".format(url, resource_id)
return url | python | def base_url(klass, space_id='', resource_id=None, environment_id=None, **kwargs):
"""
Returns the URI for the resource.
"""
url = "spaces/{0}".format(
space_id)
if environment_id is not None:
url = url = "{0}/environments/{1}".format(url, environment_id)
url = "{0}/{1}".format(
url,
base_path_for(klass.__name__)
)
if resource_id:
url = "{0}/{1}".format(url, resource_id)
return url | ['def', 'base_url', '(', 'klass', ',', 'space_id', '=', "''", ',', 'resource_id', '=', 'None', ',', 'environment_id', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'url', '=', '"spaces/{0}"', '.', 'format', '(', 'space_id', ')', 'if', 'environment_id', 'is', 'not', 'None', ':', 'url', '=', 'url', '=', '"{0}/environments/{1}"', '.', 'format', '(', 'url', ',', 'environment_id', ')', 'url', '=', '"{0}/{1}"', '.', 'format', '(', 'url', ',', 'base_path_for', '(', 'klass', '.', '__name__', ')', ')', 'if', 'resource_id', ':', 'url', '=', '"{0}/{1}"', '.', 'format', '(', 'url', ',', 'resource_id', ')', 'return', 'url'] | Returns the URI for the resource. | ['Returns', 'the', 'URI', 'for', 'the', 'resource', '.'] | train | https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/resource.py#L37-L56 |
8,623 | softlayer/softlayer-python | SoftLayer/managers/vs.py | VSManager.edit | def edit(self, instance_id, userdata=None, hostname=None, domain=None,
notes=None, tags=None):
"""Edit hostname, domain name, notes, and/or the user data of a VS.
Parameters set to None will be ignored and not attempted to be updated.
:param integer instance_id: the instance ID to edit
:param string userdata: user data on VS to edit.
If none exist it will be created
:param string hostname: valid hostname
:param string domain: valid domain namem
:param string notes: notes about this particular VS
:param string tags: tags to set on the VS as a comma separated list.
Use the empty string to remove all tags.
:returns: bool -- True or an Exception
Example::
# Change the hostname on instance 12345 to 'something'
result = mgr.edit(instance_id=12345 , hostname="something")
#result will be True or an Exception
"""
obj = {}
if userdata:
self.guest.setUserMetadata([userdata], id=instance_id)
if tags is not None:
self.set_tags(tags, guest_id=instance_id)
if hostname:
obj['hostname'] = hostname
if domain:
obj['domain'] = domain
if notes:
obj['notes'] = notes
if not obj:
return True
return self.guest.editObject(obj, id=instance_id) | python | def edit(self, instance_id, userdata=None, hostname=None, domain=None,
notes=None, tags=None):
"""Edit hostname, domain name, notes, and/or the user data of a VS.
Parameters set to None will be ignored and not attempted to be updated.
:param integer instance_id: the instance ID to edit
:param string userdata: user data on VS to edit.
If none exist it will be created
:param string hostname: valid hostname
:param string domain: valid domain namem
:param string notes: notes about this particular VS
:param string tags: tags to set on the VS as a comma separated list.
Use the empty string to remove all tags.
:returns: bool -- True or an Exception
Example::
# Change the hostname on instance 12345 to 'something'
result = mgr.edit(instance_id=12345 , hostname="something")
#result will be True or an Exception
"""
obj = {}
if userdata:
self.guest.setUserMetadata([userdata], id=instance_id)
if tags is not None:
self.set_tags(tags, guest_id=instance_id)
if hostname:
obj['hostname'] = hostname
if domain:
obj['domain'] = domain
if notes:
obj['notes'] = notes
if not obj:
return True
return self.guest.editObject(obj, id=instance_id) | ['def', 'edit', '(', 'self', ',', 'instance_id', ',', 'userdata', '=', 'None', ',', 'hostname', '=', 'None', ',', 'domain', '=', 'None', ',', 'notes', '=', 'None', ',', 'tags', '=', 'None', ')', ':', 'obj', '=', '{', '}', 'if', 'userdata', ':', 'self', '.', 'guest', '.', 'setUserMetadata', '(', '[', 'userdata', ']', ',', 'id', '=', 'instance_id', ')', 'if', 'tags', 'is', 'not', 'None', ':', 'self', '.', 'set_tags', '(', 'tags', ',', 'guest_id', '=', 'instance_id', ')', 'if', 'hostname', ':', 'obj', '[', "'hostname'", ']', '=', 'hostname', 'if', 'domain', ':', 'obj', '[', "'domain'", ']', '=', 'domain', 'if', 'notes', ':', 'obj', '[', "'notes'", ']', '=', 'notes', 'if', 'not', 'obj', ':', 'return', 'True', 'return', 'self', '.', 'guest', '.', 'editObject', '(', 'obj', ',', 'id', '=', 'instance_id', ')'] | Edit hostname, domain name, notes, and/or the user data of a VS.
Parameters set to None will be ignored and not attempted to be updated.
:param integer instance_id: the instance ID to edit
:param string userdata: user data on VS to edit.
If none exist it will be created
:param string hostname: valid hostname
:param string domain: valid domain namem
:param string notes: notes about this particular VS
:param string tags: tags to set on the VS as a comma separated list.
Use the empty string to remove all tags.
:returns: bool -- True or an Exception
Example::
# Change the hostname on instance 12345 to 'something'
result = mgr.edit(instance_id=12345 , hostname="something")
#result will be True or an Exception | ['Edit', 'hostname', 'domain', 'name', 'notes', 'and', '/', 'or', 'the', 'user', 'data', 'of', 'a', 'VS', '.'] | train | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/vs.py#L694-L735 |
8,624 | materialsproject/pymatgen | pymatgen/apps/borg/queen.py | order_assimilation | def order_assimilation(args):
"""
Internal helper method for BorgQueen to process assimilation
"""
(path, drone, data, status) = args
newdata = drone.assimilate(path)
if newdata:
data.append(json.dumps(newdata, cls=MontyEncoder))
status['count'] += 1
count = status['count']
total = status['total']
logger.info('{}/{} ({:.2f}%) done'.format(count, total,
count / total * 100)) | python | def order_assimilation(args):
"""
Internal helper method for BorgQueen to process assimilation
"""
(path, drone, data, status) = args
newdata = drone.assimilate(path)
if newdata:
data.append(json.dumps(newdata, cls=MontyEncoder))
status['count'] += 1
count = status['count']
total = status['total']
logger.info('{}/{} ({:.2f}%) done'.format(count, total,
count / total * 100)) | ['def', 'order_assimilation', '(', 'args', ')', ':', '(', 'path', ',', 'drone', ',', 'data', ',', 'status', ')', '=', 'args', 'newdata', '=', 'drone', '.', 'assimilate', '(', 'path', ')', 'if', 'newdata', ':', 'data', '.', 'append', '(', 'json', '.', 'dumps', '(', 'newdata', ',', 'cls', '=', 'MontyEncoder', ')', ')', 'status', '[', "'count'", ']', '+=', '1', 'count', '=', 'status', '[', "'count'", ']', 'total', '=', 'status', '[', "'total'", ']', 'logger', '.', 'info', '(', "'{}/{} ({:.2f}%) done'", '.', 'format', '(', 'count', ',', 'total', ',', 'count', '/', 'total', '*', '100', ')', ')'] | Internal helper method for BorgQueen to process assimilation | ['Internal', 'helper', 'method', 'for', 'BorgQueen', 'to', 'process', 'assimilation'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/apps/borg/queen.py#L131-L143 |
8,625 | osilkin98/PyBRY | generator.py | get_lbry_api_function_docs | def get_lbry_api_function_docs(url=LBRY_API_RAW_JSON_URL):
""" Scrapes the given URL to a page in JSON format to obtain the documentation for the LBRY API
:param str url: URL to the documentation we need to obtain,
pybry.constants.LBRY_API_RAW_JSON_URL by default
:return: List of functions retrieved from the `url` given
:rtype: list
"""
try:
# Grab the page content
docs_page = urlopen(url)
# Read the contents of the actual url we grabbed and decode them into UTF-8
contents = docs_page.read().decode("utf-8")
# Return the contents loaded as JSON
return loads(contents)
# If we get an exception, simply exit
except URLError as UE:
print(UE)
except Exception as E:
print(E)
return [] | python | def get_lbry_api_function_docs(url=LBRY_API_RAW_JSON_URL):
""" Scrapes the given URL to a page in JSON format to obtain the documentation for the LBRY API
:param str url: URL to the documentation we need to obtain,
pybry.constants.LBRY_API_RAW_JSON_URL by default
:return: List of functions retrieved from the `url` given
:rtype: list
"""
try:
# Grab the page content
docs_page = urlopen(url)
# Read the contents of the actual url we grabbed and decode them into UTF-8
contents = docs_page.read().decode("utf-8")
# Return the contents loaded as JSON
return loads(contents)
# If we get an exception, simply exit
except URLError as UE:
print(UE)
except Exception as E:
print(E)
return [] | ['def', 'get_lbry_api_function_docs', '(', 'url', '=', 'LBRY_API_RAW_JSON_URL', ')', ':', 'try', ':', '# Grab the page content', 'docs_page', '=', 'urlopen', '(', 'url', ')', '# Read the contents of the actual url we grabbed and decode them into UTF-8', 'contents', '=', 'docs_page', '.', 'read', '(', ')', '.', 'decode', '(', '"utf-8"', ')', '# Return the contents loaded as JSON', 'return', 'loads', '(', 'contents', ')', '# If we get an exception, simply exit', 'except', 'URLError', 'as', 'UE', ':', 'print', '(', 'UE', ')', 'except', 'Exception', 'as', 'E', ':', 'print', '(', 'E', ')', 'return', '[', ']'] | Scrapes the given URL to a page in JSON format to obtain the documentation for the LBRY API
:param str url: URL to the documentation we need to obtain,
pybry.constants.LBRY_API_RAW_JSON_URL by default
:return: List of functions retrieved from the `url` given
:rtype: list | ['Scrapes', 'the', 'given', 'URL', 'to', 'a', 'page', 'in', 'JSON', 'format', 'to', 'obtain', 'the', 'documentation', 'for', 'the', 'LBRY', 'API'] | train | https://github.com/osilkin98/PyBRY/blob/af86805a8077916f72f3fe980943d4cd741e61f0/generator.py#L10-L36 |
8,626 | JoelBender/bacpypes | py25/bacpypes/bsllservice.py | ProxyServiceNetworkAdapter.process_npdu | def process_npdu(self, npdu):
"""encode NPDUs from the network service access point and send them to the proxy."""
if _debug: ProxyServiceNetworkAdapter._debug("process_npdu %r", npdu)
# encode the npdu as if it was about to be delivered to the network
pdu = PDU()
npdu.encode(pdu)
if _debug: ProxyServiceNetworkAdapter._debug(" - pdu: %r", pdu)
# broadcast messages go to peers
if pdu.pduDestination.addrType == Address.localBroadcastAddr:
xpdu = ServerToProxyBroadcastNPDU(pdu)
else:
xpdu = ServerToProxyUnicastNPDU(pdu.pduDestination, pdu)
# the connection has the correct address
xpdu.pduDestination = self.conn.address
# send it down to the multiplexer
self.conn.service.service_request(xpdu) | python | def process_npdu(self, npdu):
"""encode NPDUs from the network service access point and send them to the proxy."""
if _debug: ProxyServiceNetworkAdapter._debug("process_npdu %r", npdu)
# encode the npdu as if it was about to be delivered to the network
pdu = PDU()
npdu.encode(pdu)
if _debug: ProxyServiceNetworkAdapter._debug(" - pdu: %r", pdu)
# broadcast messages go to peers
if pdu.pduDestination.addrType == Address.localBroadcastAddr:
xpdu = ServerToProxyBroadcastNPDU(pdu)
else:
xpdu = ServerToProxyUnicastNPDU(pdu.pduDestination, pdu)
# the connection has the correct address
xpdu.pduDestination = self.conn.address
# send it down to the multiplexer
self.conn.service.service_request(xpdu) | ['def', 'process_npdu', '(', 'self', ',', 'npdu', ')', ':', 'if', '_debug', ':', 'ProxyServiceNetworkAdapter', '.', '_debug', '(', '"process_npdu %r"', ',', 'npdu', ')', '# encode the npdu as if it was about to be delivered to the network', 'pdu', '=', 'PDU', '(', ')', 'npdu', '.', 'encode', '(', 'pdu', ')', 'if', '_debug', ':', 'ProxyServiceNetworkAdapter', '.', '_debug', '(', '" - pdu: %r"', ',', 'pdu', ')', '# broadcast messages go to peers', 'if', 'pdu', '.', 'pduDestination', '.', 'addrType', '==', 'Address', '.', 'localBroadcastAddr', ':', 'xpdu', '=', 'ServerToProxyBroadcastNPDU', '(', 'pdu', ')', 'else', ':', 'xpdu', '=', 'ServerToProxyUnicastNPDU', '(', 'pdu', '.', 'pduDestination', ',', 'pdu', ')', '# the connection has the correct address', 'xpdu', '.', 'pduDestination', '=', 'self', '.', 'conn', '.', 'address', '# send it down to the multiplexer', 'self', '.', 'conn', '.', 'service', '.', 'service_request', '(', 'xpdu', ')'] | encode NPDUs from the network service access point and send them to the proxy. | ['encode', 'NPDUs', 'from', 'the', 'network', 'service', 'access', 'point', 'and', 'send', 'them', 'to', 'the', 'proxy', '.'] | train | https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/bsllservice.py#L1129-L1148 |
8,627 | tensorflow/tensor2tensor | tensor2tensor/layers/common_attention.py | gather_blocks_2d | def gather_blocks_2d(x, indices):
"""Gathers flattened blocks from x."""
x_shape = common_layers.shape_list(x)
x = reshape_range(x, 2, 4, [tf.reduce_prod(x_shape[2:4])])
# [length, batch, heads, dim]
x_t = tf.transpose(x, [2, 0, 1, 3])
x_new = tf.gather(x_t, indices)
# returns [batch, heads, num_blocks, block_length ** 2, dim]
return tf.transpose(x_new, [2, 3, 0, 1, 4]) | python | def gather_blocks_2d(x, indices):
"""Gathers flattened blocks from x."""
x_shape = common_layers.shape_list(x)
x = reshape_range(x, 2, 4, [tf.reduce_prod(x_shape[2:4])])
# [length, batch, heads, dim]
x_t = tf.transpose(x, [2, 0, 1, 3])
x_new = tf.gather(x_t, indices)
# returns [batch, heads, num_blocks, block_length ** 2, dim]
return tf.transpose(x_new, [2, 3, 0, 1, 4]) | ['def', 'gather_blocks_2d', '(', 'x', ',', 'indices', ')', ':', 'x_shape', '=', 'common_layers', '.', 'shape_list', '(', 'x', ')', 'x', '=', 'reshape_range', '(', 'x', ',', '2', ',', '4', ',', '[', 'tf', '.', 'reduce_prod', '(', 'x_shape', '[', '2', ':', '4', ']', ')', ']', ')', '# [length, batch, heads, dim]', 'x_t', '=', 'tf', '.', 'transpose', '(', 'x', ',', '[', '2', ',', '0', ',', '1', ',', '3', ']', ')', 'x_new', '=', 'tf', '.', 'gather', '(', 'x_t', ',', 'indices', ')', '# returns [batch, heads, num_blocks, block_length ** 2, dim]', 'return', 'tf', '.', 'transpose', '(', 'x_new', ',', '[', '2', ',', '3', ',', '0', ',', '1', ',', '4', ']', ')'] | Gathers flattened blocks from x. | ['Gathers', 'flattened', 'blocks', 'from', 'x', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3561-L3569 |
8,628 | biosustain/optlang | optlang/interface.py | Model.remove | def remove(self, stuff):
"""Remove variables and constraints.
Parameters
----------
stuff : iterable, str, Variable, Constraint
Either an iterable containing variables and constraints to be removed from the model or a single variable or contstraint (or their names).
Returns
-------
None
"""
if self._pending_modifications.toggle == 'add':
self.update()
self._pending_modifications.toggle = 'remove'
if isinstance(stuff, str):
try:
variable = self.variables[stuff]
self._pending_modifications.rm_var.append(variable)
except KeyError:
try:
constraint = self.constraints[stuff]
self._pending_modifications.rm_constr.append(constraint)
except KeyError:
raise LookupError(
"%s is neither a variable nor a constraint in the current solver instance." % stuff)
elif isinstance(stuff, Variable):
self._pending_modifications.rm_var.append(stuff)
elif isinstance(stuff, Constraint):
self._pending_modifications.rm_constr.append(stuff)
elif isinstance(stuff, collections.Iterable):
for elem in stuff:
self.remove(elem)
elif isinstance(stuff, Objective):
raise TypeError(
"Cannot remove objective %s. Use model.objective = Objective(...) to change the current objective." % stuff)
else:
raise TypeError(
"Cannot remove %s. It neither a variable or constraint." % stuff) | python | def remove(self, stuff):
"""Remove variables and constraints.
Parameters
----------
stuff : iterable, str, Variable, Constraint
Either an iterable containing variables and constraints to be removed from the model or a single variable or contstraint (or their names).
Returns
-------
None
"""
if self._pending_modifications.toggle == 'add':
self.update()
self._pending_modifications.toggle = 'remove'
if isinstance(stuff, str):
try:
variable = self.variables[stuff]
self._pending_modifications.rm_var.append(variable)
except KeyError:
try:
constraint = self.constraints[stuff]
self._pending_modifications.rm_constr.append(constraint)
except KeyError:
raise LookupError(
"%s is neither a variable nor a constraint in the current solver instance." % stuff)
elif isinstance(stuff, Variable):
self._pending_modifications.rm_var.append(stuff)
elif isinstance(stuff, Constraint):
self._pending_modifications.rm_constr.append(stuff)
elif isinstance(stuff, collections.Iterable):
for elem in stuff:
self.remove(elem)
elif isinstance(stuff, Objective):
raise TypeError(
"Cannot remove objective %s. Use model.objective = Objective(...) to change the current objective." % stuff)
else:
raise TypeError(
"Cannot remove %s. It neither a variable or constraint." % stuff) | ['def', 'remove', '(', 'self', ',', 'stuff', ')', ':', 'if', 'self', '.', '_pending_modifications', '.', 'toggle', '==', "'add'", ':', 'self', '.', 'update', '(', ')', 'self', '.', '_pending_modifications', '.', 'toggle', '=', "'remove'", 'if', 'isinstance', '(', 'stuff', ',', 'str', ')', ':', 'try', ':', 'variable', '=', 'self', '.', 'variables', '[', 'stuff', ']', 'self', '.', '_pending_modifications', '.', 'rm_var', '.', 'append', '(', 'variable', ')', 'except', 'KeyError', ':', 'try', ':', 'constraint', '=', 'self', '.', 'constraints', '[', 'stuff', ']', 'self', '.', '_pending_modifications', '.', 'rm_constr', '.', 'append', '(', 'constraint', ')', 'except', 'KeyError', ':', 'raise', 'LookupError', '(', '"%s is neither a variable nor a constraint in the current solver instance."', '%', 'stuff', ')', 'elif', 'isinstance', '(', 'stuff', ',', 'Variable', ')', ':', 'self', '.', '_pending_modifications', '.', 'rm_var', '.', 'append', '(', 'stuff', ')', 'elif', 'isinstance', '(', 'stuff', ',', 'Constraint', ')', ':', 'self', '.', '_pending_modifications', '.', 'rm_constr', '.', 'append', '(', 'stuff', ')', 'elif', 'isinstance', '(', 'stuff', ',', 'collections', '.', 'Iterable', ')', ':', 'for', 'elem', 'in', 'stuff', ':', 'self', '.', 'remove', '(', 'elem', ')', 'elif', 'isinstance', '(', 'stuff', ',', 'Objective', ')', ':', 'raise', 'TypeError', '(', '"Cannot remove objective %s. Use model.objective = Objective(...) to change the current objective."', '%', 'stuff', ')', 'else', ':', 'raise', 'TypeError', '(', '"Cannot remove %s. It neither a variable or constraint."', '%', 'stuff', ')'] | Remove variables and constraints.
Parameters
----------
stuff : iterable, str, Variable, Constraint
Either an iterable containing variables and constraints to be removed from the model or a single variable or contstraint (or their names).
Returns
-------
None | ['Remove', 'variables', 'and', 'constraints', '.'] | train | https://github.com/biosustain/optlang/blob/13673ac26f6b3ba37a2ef392489722c52e3c5ff1/optlang/interface.py#L1377-L1415 |
8,629 | google/openhtf | openhtf/plugs/usb/filesync_service.py | AbstractFilesyncTransport.read_until_done | def read_until_done(self, command, timeout=None):
"""Yield messages read until we receive a 'DONE' command.
Read messages of the given command until we receive a 'DONE' command. If a
command different than the requested one is received, an AdbProtocolError
is raised.
Args:
command: The command to expect, like 'DENT' or 'DATA'.
timeout: The timeouts.PolledTimeout to use for this operation.
Yields:
Messages read, of type self.RECV_MSG_TYPE, see read_message().
Raises:
AdbProtocolError: If an unexpected command is read.
AdbRemoteError: If a 'FAIL' message is read.
"""
message = self.read_message(timeout)
while message.command != 'DONE':
message.assert_command_is(command)
yield message
message = self.read_message(timeout) | python | def read_until_done(self, command, timeout=None):
"""Yield messages read until we receive a 'DONE' command.
Read messages of the given command until we receive a 'DONE' command. If a
command different than the requested one is received, an AdbProtocolError
is raised.
Args:
command: The command to expect, like 'DENT' or 'DATA'.
timeout: The timeouts.PolledTimeout to use for this operation.
Yields:
Messages read, of type self.RECV_MSG_TYPE, see read_message().
Raises:
AdbProtocolError: If an unexpected command is read.
AdbRemoteError: If a 'FAIL' message is read.
"""
message = self.read_message(timeout)
while message.command != 'DONE':
message.assert_command_is(command)
yield message
message = self.read_message(timeout) | ['def', 'read_until_done', '(', 'self', ',', 'command', ',', 'timeout', '=', 'None', ')', ':', 'message', '=', 'self', '.', 'read_message', '(', 'timeout', ')', 'while', 'message', '.', 'command', '!=', "'DONE'", ':', 'message', '.', 'assert_command_is', '(', 'command', ')', 'yield', 'message', 'message', '=', 'self', '.', 'read_message', '(', 'timeout', ')'] | Yield messages read until we receive a 'DONE' command.
Read messages of the given command until we receive a 'DONE' command. If a
command different than the requested one is received, an AdbProtocolError
is raised.
Args:
command: The command to expect, like 'DENT' or 'DATA'.
timeout: The timeouts.PolledTimeout to use for this operation.
Yields:
Messages read, of type self.RECV_MSG_TYPE, see read_message().
Raises:
AdbProtocolError: If an unexpected command is read.
AdbRemoteError: If a 'FAIL' message is read. | ['Yield', 'messages', 'read', 'until', 'we', 'receive', 'a', 'DONE', 'command', '.'] | train | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L391-L413 |
8,630 | deepmind/sonnet | sonnet/python/ops/initializers.py | _Restore._partition_spec | def _partition_spec(self, shape, partition_info):
"""Build magic (and sparsely documented) shapes_and_slices spec string."""
if partition_info is None:
return '' # Empty string indicates a non-partitioned tensor.
ssi = tf.Variable.SaveSliceInfo(
full_name=self._var_name,
full_shape=partition_info.full_shape,
var_offset=partition_info.var_offset,
var_shape=shape)
return ssi.spec | python | def _partition_spec(self, shape, partition_info):
"""Build magic (and sparsely documented) shapes_and_slices spec string."""
if partition_info is None:
return '' # Empty string indicates a non-partitioned tensor.
ssi = tf.Variable.SaveSliceInfo(
full_name=self._var_name,
full_shape=partition_info.full_shape,
var_offset=partition_info.var_offset,
var_shape=shape)
return ssi.spec | ['def', '_partition_spec', '(', 'self', ',', 'shape', ',', 'partition_info', ')', ':', 'if', 'partition_info', 'is', 'None', ':', 'return', "''", '# Empty string indicates a non-partitioned tensor.', 'ssi', '=', 'tf', '.', 'Variable', '.', 'SaveSliceInfo', '(', 'full_name', '=', 'self', '.', '_var_name', ',', 'full_shape', '=', 'partition_info', '.', 'full_shape', ',', 'var_offset', '=', 'partition_info', '.', 'var_offset', ',', 'var_shape', '=', 'shape', ')', 'return', 'ssi', '.', 'spec'] | Build magic (and sparsely documented) shapes_and_slices spec string. | ['Build', 'magic', '(', 'and', 'sparsely', 'documented', ')', 'shapes_and_slices', 'spec', 'string', '.'] | train | https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/ops/initializers.py#L58-L67 |
8,631 | gambogi/CSHLDAP | CSHLDAP.py | CSHLDAP.search | def search( self, base=False, trim=False, objects=False, **kwargs ):
""" Returns matching entries for search in ldap
structured as [(dn, {attributes})]
UNLESS searching by dn, in which case the first match
is returned
"""
scope = pyldap.SCOPE_SUBTREE
if not base:
base = self.users
filterstr =''
for key, value in kwargs.iteritems():
filterstr += '({0}={1})'.format(key,value)
if key == 'dn':
filterstr = '(objectClass=*)'
base = value
scope = pyldap.SCOPE_BASE
break
if len(kwargs) > 1:
filterstr = '(&'+filterstr+')'
result = self.ldap.search_s(base, pyldap.SCOPE_SUBTREE, filterstr, ['*','+'])
if base == self.users:
for member in result:
groups = self.getGroups(member[0])
member[1]['groups'] = groups
if 'eboard' in member[1]['groups']:
member[1]['committee'] = self.search(base=self.committees, \
head=member[0])[0][1]['cn'][0]
if objects:
return self.memberObjects(result)
finalResult = self.trimResult(result) if trim else result
return finalResult | python | def search( self, base=False, trim=False, objects=False, **kwargs ):
""" Returns matching entries for search in ldap
structured as [(dn, {attributes})]
UNLESS searching by dn, in which case the first match
is returned
"""
scope = pyldap.SCOPE_SUBTREE
if not base:
base = self.users
filterstr =''
for key, value in kwargs.iteritems():
filterstr += '({0}={1})'.format(key,value)
if key == 'dn':
filterstr = '(objectClass=*)'
base = value
scope = pyldap.SCOPE_BASE
break
if len(kwargs) > 1:
filterstr = '(&'+filterstr+')'
result = self.ldap.search_s(base, pyldap.SCOPE_SUBTREE, filterstr, ['*','+'])
if base == self.users:
for member in result:
groups = self.getGroups(member[0])
member[1]['groups'] = groups
if 'eboard' in member[1]['groups']:
member[1]['committee'] = self.search(base=self.committees, \
head=member[0])[0][1]['cn'][0]
if objects:
return self.memberObjects(result)
finalResult = self.trimResult(result) if trim else result
return finalResult | ['def', 'search', '(', 'self', ',', 'base', '=', 'False', ',', 'trim', '=', 'False', ',', 'objects', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'scope', '=', 'pyldap', '.', 'SCOPE_SUBTREE', 'if', 'not', 'base', ':', 'base', '=', 'self', '.', 'users', 'filterstr', '=', "''", 'for', 'key', ',', 'value', 'in', 'kwargs', '.', 'iteritems', '(', ')', ':', 'filterstr', '+=', "'({0}={1})'", '.', 'format', '(', 'key', ',', 'value', ')', 'if', 'key', '==', "'dn'", ':', 'filterstr', '=', "'(objectClass=*)'", 'base', '=', 'value', 'scope', '=', 'pyldap', '.', 'SCOPE_BASE', 'break', 'if', 'len', '(', 'kwargs', ')', '>', '1', ':', 'filterstr', '=', "'(&'", '+', 'filterstr', '+', "')'", 'result', '=', 'self', '.', 'ldap', '.', 'search_s', '(', 'base', ',', 'pyldap', '.', 'SCOPE_SUBTREE', ',', 'filterstr', ',', '[', "'*'", ',', "'+'", ']', ')', 'if', 'base', '==', 'self', '.', 'users', ':', 'for', 'member', 'in', 'result', ':', 'groups', '=', 'self', '.', 'getGroups', '(', 'member', '[', '0', ']', ')', 'member', '[', '1', ']', '[', "'groups'", ']', '=', 'groups', 'if', "'eboard'", 'in', 'member', '[', '1', ']', '[', "'groups'", ']', ':', 'member', '[', '1', ']', '[', "'committee'", ']', '=', 'self', '.', 'search', '(', 'base', '=', 'self', '.', 'committees', ',', 'head', '=', 'member', '[', '0', ']', ')', '[', '0', ']', '[', '1', ']', '[', "'cn'", ']', '[', '0', ']', 'if', 'objects', ':', 'return', 'self', '.', 'memberObjects', '(', 'result', ')', 'finalResult', '=', 'self', '.', 'trimResult', '(', 'result', ')', 'if', 'trim', 'else', 'result', 'return', 'finalResult'] | Returns matching entries for search in ldap
structured as [(dn, {attributes})]
UNLESS searching by dn, in which case the first match
is returned | ['Returns', 'matching', 'entries', 'for', 'search', 'in', 'ldap', 'structured', 'as', '[', '(', 'dn', '{', 'attributes', '}', ')', ']', 'UNLESS', 'searching', 'by', 'dn', 'in', 'which', 'case', 'the', 'first', 'match', 'is', 'returned'] | train | https://github.com/gambogi/CSHLDAP/blob/09cb754b1e72437834e0d8cb4c7ac1830cfa6829/CSHLDAP.py#L112-L145 |
8,632 | Crunch-io/crunch-cube | src/cr/cube/cube_slice.py | CubeSlice.min_base_size_mask | def min_base_size_mask(self, size, hs_dims=None, prune=False):
"""Returns MinBaseSizeMask object with correct row, col and table masks.
The returned object stores the necessary information about the base size, as
well as about the base values. It can create corresponding masks in teh row,
column, and table directions, based on the corresponding base values
(the values of the unweighted margins).
Usage:
>>> cube_slice = CrunchCube(response).slices[0] # obtain a valid cube slice
>>> cube_slice.min_base_size_mask(30).row_mask
>>> cube_slice.min_base_size_mask(50).column_mask
>>> cube_slice.min_base_size_mask(22).table_mask
"""
return MinBaseSizeMask(self, size, hs_dims=hs_dims, prune=prune) | python | def min_base_size_mask(self, size, hs_dims=None, prune=False):
"""Returns MinBaseSizeMask object with correct row, col and table masks.
The returned object stores the necessary information about the base size, as
well as about the base values. It can create corresponding masks in teh row,
column, and table directions, based on the corresponding base values
(the values of the unweighted margins).
Usage:
>>> cube_slice = CrunchCube(response).slices[0] # obtain a valid cube slice
>>> cube_slice.min_base_size_mask(30).row_mask
>>> cube_slice.min_base_size_mask(50).column_mask
>>> cube_slice.min_base_size_mask(22).table_mask
"""
return MinBaseSizeMask(self, size, hs_dims=hs_dims, prune=prune) | ['def', 'min_base_size_mask', '(', 'self', ',', 'size', ',', 'hs_dims', '=', 'None', ',', 'prune', '=', 'False', ')', ':', 'return', 'MinBaseSizeMask', '(', 'self', ',', 'size', ',', 'hs_dims', '=', 'hs_dims', ',', 'prune', '=', 'prune', ')'] | Returns MinBaseSizeMask object with correct row, col and table masks.
The returned object stores the necessary information about the base size, as
well as about the base values. It can create corresponding masks in teh row,
column, and table directions, based on the corresponding base values
(the values of the unweighted margins).
Usage:
>>> cube_slice = CrunchCube(response).slices[0] # obtain a valid cube slice
>>> cube_slice.min_base_size_mask(30).row_mask
>>> cube_slice.min_base_size_mask(50).column_mask
>>> cube_slice.min_base_size_mask(22).table_mask | ['Returns', 'MinBaseSizeMask', 'object', 'with', 'correct', 'row', 'col', 'and', 'table', 'masks', '.'] | train | https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/cube_slice.py#L348-L362 |
8,633 | CodyKochmann/time_limit | commit-update.py | sync_readmes | def sync_readmes():
""" just copies README.md into README for pypi documentation """
print("syncing README")
with open("README.md", 'r') as reader:
file_text = reader.read()
with open("README", 'w') as writer:
writer.write(file_text) | python | def sync_readmes():
""" just copies README.md into README for pypi documentation """
print("syncing README")
with open("README.md", 'r') as reader:
file_text = reader.read()
with open("README", 'w') as writer:
writer.write(file_text) | ['def', 'sync_readmes', '(', ')', ':', 'print', '(', '"syncing README"', ')', 'with', 'open', '(', '"README.md"', ',', "'r'", ')', 'as', 'reader', ':', 'file_text', '=', 'reader', '.', 'read', '(', ')', 'with', 'open', '(', '"README"', ',', "'w'", ')', 'as', 'writer', ':', 'writer', '.', 'write', '(', 'file_text', ')'] | just copies README.md into README for pypi documentation | ['just', 'copies', 'README', '.', 'md', 'into', 'README', 'for', 'pypi', 'documentation'] | train | https://github.com/CodyKochmann/time_limit/blob/447a640d3e187bb4775d780b757c6d9bdc88ae34/commit-update.py#L50-L56 |
8,634 | BD2KGenomics/protect | src/protect/mutation_calling/common.py | merge_perchrom_mutations | def merge_perchrom_mutations(job, chrom, mutations, univ_options):
"""
Merge the mutation calls for a single chromosome.
:param str chrom: Chromosome to process
:param dict mutations: dict of dicts of the various mutation caller names as keys, and a dict of
per chromosome job store ids for vcfs as value
:param dict univ_options: Dict of universal options used by almost all tools
:returns fsID for vcf contaning merged calls for the given chromosome
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
from protect.mutation_calling.muse import process_muse_vcf
from protect.mutation_calling.mutect import process_mutect_vcf
from protect.mutation_calling.radia import process_radia_vcf
from protect.mutation_calling.somaticsniper import process_somaticsniper_vcf
from protect.mutation_calling.strelka import process_strelka_vcf
mutations.pop('indels')
mutations['strelka_indels'] = mutations['strelka']['indels']
mutations['strelka_snvs'] = mutations['strelka']['snvs']
vcf_processor = {'snvs': {'mutect': process_mutect_vcf,
'muse': process_muse_vcf,
'radia': process_radia_vcf,
'somaticsniper': process_somaticsniper_vcf,
'strelka_snvs': process_strelka_vcf
},
'indels': {'strelka_indels': process_strelka_vcf
}
}
# 'fusions': lambda x: None,
# 'indels': lambda x: None}
# For now, let's just say 2 out of n need to call it.
# num_preds = len(mutations)
# majority = int((num_preds + 0.5) / 2)
majority = {'snvs': 2,
'indels': 1}
accepted_hits = defaultdict(dict)
for mut_type in vcf_processor.keys():
# Get input files
perchrom_mutations = {caller: vcf_processor[mut_type][caller](job, mutations[caller][chrom],
work_dir, univ_options)
for caller in vcf_processor[mut_type]}
# Process the strelka key
perchrom_mutations['strelka'] = perchrom_mutations['strelka_' + mut_type]
perchrom_mutations.pop('strelka_' + mut_type)
# Read in each file to a dict
vcf_lists = {caller: read_vcf(vcf_file) for caller, vcf_file in perchrom_mutations.items()}
all_positions = list(set(itertools.chain(*vcf_lists.values())))
for position in sorted(all_positions):
hits = {caller: position in vcf_lists[caller] for caller in perchrom_mutations.keys()}
if sum(hits.values()) >= majority[mut_type]:
callers = ','.join([caller for caller, hit in hits.items() if hit])
assert position[1] not in accepted_hits[position[0]]
accepted_hits[position[0]][position[1]] = (position[2], position[3], callers)
with open(''.join([work_dir, '/', chrom, '.vcf']), 'w') as outfile:
print('##fileformat=VCFv4.0', file=outfile)
print('##INFO=<ID=callers,Number=.,Type=String,Description=List of supporting callers.',
file=outfile)
print('#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO', file=outfile)
for chrom in chrom_sorted(accepted_hits.keys()):
for position in sorted(accepted_hits[chrom]):
print(chrom, position, '.', accepted_hits[chrom][position][0],
accepted_hits[chrom][position][1], '.', 'PASS',
'callers=' + accepted_hits[chrom][position][2], sep='\t', file=outfile)
fsid = job.fileStore.writeGlobalFile(outfile.name)
export_results(job, fsid, outfile.name, univ_options, subfolder='mutations/merged')
return fsid | python | def merge_perchrom_mutations(job, chrom, mutations, univ_options):
"""
Merge the mutation calls for a single chromosome.
:param str chrom: Chromosome to process
:param dict mutations: dict of dicts of the various mutation caller names as keys, and a dict of
per chromosome job store ids for vcfs as value
:param dict univ_options: Dict of universal options used by almost all tools
:returns fsID for vcf contaning merged calls for the given chromosome
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
from protect.mutation_calling.muse import process_muse_vcf
from protect.mutation_calling.mutect import process_mutect_vcf
from protect.mutation_calling.radia import process_radia_vcf
from protect.mutation_calling.somaticsniper import process_somaticsniper_vcf
from protect.mutation_calling.strelka import process_strelka_vcf
mutations.pop('indels')
mutations['strelka_indels'] = mutations['strelka']['indels']
mutations['strelka_snvs'] = mutations['strelka']['snvs']
vcf_processor = {'snvs': {'mutect': process_mutect_vcf,
'muse': process_muse_vcf,
'radia': process_radia_vcf,
'somaticsniper': process_somaticsniper_vcf,
'strelka_snvs': process_strelka_vcf
},
'indels': {'strelka_indels': process_strelka_vcf
}
}
# 'fusions': lambda x: None,
# 'indels': lambda x: None}
# For now, let's just say 2 out of n need to call it.
# num_preds = len(mutations)
# majority = int((num_preds + 0.5) / 2)
majority = {'snvs': 2,
'indels': 1}
accepted_hits = defaultdict(dict)
for mut_type in vcf_processor.keys():
# Get input files
perchrom_mutations = {caller: vcf_processor[mut_type][caller](job, mutations[caller][chrom],
work_dir, univ_options)
for caller in vcf_processor[mut_type]}
# Process the strelka key
perchrom_mutations['strelka'] = perchrom_mutations['strelka_' + mut_type]
perchrom_mutations.pop('strelka_' + mut_type)
# Read in each file to a dict
vcf_lists = {caller: read_vcf(vcf_file) for caller, vcf_file in perchrom_mutations.items()}
all_positions = list(set(itertools.chain(*vcf_lists.values())))
for position in sorted(all_positions):
hits = {caller: position in vcf_lists[caller] for caller in perchrom_mutations.keys()}
if sum(hits.values()) >= majority[mut_type]:
callers = ','.join([caller for caller, hit in hits.items() if hit])
assert position[1] not in accepted_hits[position[0]]
accepted_hits[position[0]][position[1]] = (position[2], position[3], callers)
with open(''.join([work_dir, '/', chrom, '.vcf']), 'w') as outfile:
print('##fileformat=VCFv4.0', file=outfile)
print('##INFO=<ID=callers,Number=.,Type=String,Description=List of supporting callers.',
file=outfile)
print('#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO', file=outfile)
for chrom in chrom_sorted(accepted_hits.keys()):
for position in sorted(accepted_hits[chrom]):
print(chrom, position, '.', accepted_hits[chrom][position][0],
accepted_hits[chrom][position][1], '.', 'PASS',
'callers=' + accepted_hits[chrom][position][2], sep='\t', file=outfile)
fsid = job.fileStore.writeGlobalFile(outfile.name)
export_results(job, fsid, outfile.name, univ_options, subfolder='mutations/merged')
return fsid | ['def', 'merge_perchrom_mutations', '(', 'job', ',', 'chrom', ',', 'mutations', ',', 'univ_options', ')', ':', 'work_dir', '=', 'os', '.', 'getcwd', '(', ')', 'from', 'protect', '.', 'mutation_calling', '.', 'muse', 'import', 'process_muse_vcf', 'from', 'protect', '.', 'mutation_calling', '.', 'mutect', 'import', 'process_mutect_vcf', 'from', 'protect', '.', 'mutation_calling', '.', 'radia', 'import', 'process_radia_vcf', 'from', 'protect', '.', 'mutation_calling', '.', 'somaticsniper', 'import', 'process_somaticsniper_vcf', 'from', 'protect', '.', 'mutation_calling', '.', 'strelka', 'import', 'process_strelka_vcf', 'mutations', '.', 'pop', '(', "'indels'", ')', 'mutations', '[', "'strelka_indels'", ']', '=', 'mutations', '[', "'strelka'", ']', '[', "'indels'", ']', 'mutations', '[', "'strelka_snvs'", ']', '=', 'mutations', '[', "'strelka'", ']', '[', "'snvs'", ']', 'vcf_processor', '=', '{', "'snvs'", ':', '{', "'mutect'", ':', 'process_mutect_vcf', ',', "'muse'", ':', 'process_muse_vcf', ',', "'radia'", ':', 'process_radia_vcf', ',', "'somaticsniper'", ':', 'process_somaticsniper_vcf', ',', "'strelka_snvs'", ':', 'process_strelka_vcf', '}', ',', "'indels'", ':', '{', "'strelka_indels'", ':', 'process_strelka_vcf', '}', '}', "# 'fusions': lambda x: None,", "# 'indels': lambda x: None}", "# For now, let's just say 2 out of n need to call it.", '# num_preds = len(mutations)', '# majority = int((num_preds + 0.5) / 2)', 'majority', '=', '{', "'snvs'", ':', '2', ',', "'indels'", ':', '1', '}', 'accepted_hits', '=', 'defaultdict', '(', 'dict', ')', 'for', 'mut_type', 'in', 'vcf_processor', '.', 'keys', '(', ')', ':', '# Get input files', 'perchrom_mutations', '=', '{', 'caller', ':', 'vcf_processor', '[', 'mut_type', ']', '[', 'caller', ']', '(', 'job', ',', 'mutations', '[', 'caller', ']', '[', 'chrom', ']', ',', 'work_dir', ',', 'univ_options', ')', 'for', 'caller', 'in', 'vcf_processor', '[', 'mut_type', ']', '}', '# Process the strelka key', 'perchrom_mutations', '[', "'strelka'", ']', '=', 'perchrom_mutations', '[', "'strelka_'", '+', 'mut_type', ']', 'perchrom_mutations', '.', 'pop', '(', "'strelka_'", '+', 'mut_type', ')', '# Read in each file to a dict', 'vcf_lists', '=', '{', 'caller', ':', 'read_vcf', '(', 'vcf_file', ')', 'for', 'caller', ',', 'vcf_file', 'in', 'perchrom_mutations', '.', 'items', '(', ')', '}', 'all_positions', '=', 'list', '(', 'set', '(', 'itertools', '.', 'chain', '(', '*', 'vcf_lists', '.', 'values', '(', ')', ')', ')', ')', 'for', 'position', 'in', 'sorted', '(', 'all_positions', ')', ':', 'hits', '=', '{', 'caller', ':', 'position', 'in', 'vcf_lists', '[', 'caller', ']', 'for', 'caller', 'in', 'perchrom_mutations', '.', 'keys', '(', ')', '}', 'if', 'sum', '(', 'hits', '.', 'values', '(', ')', ')', '>=', 'majority', '[', 'mut_type', ']', ':', 'callers', '=', "','", '.', 'join', '(', '[', 'caller', 'for', 'caller', ',', 'hit', 'in', 'hits', '.', 'items', '(', ')', 'if', 'hit', ']', ')', 'assert', 'position', '[', '1', ']', 'not', 'in', 'accepted_hits', '[', 'position', '[', '0', ']', ']', 'accepted_hits', '[', 'position', '[', '0', ']', ']', '[', 'position', '[', '1', ']', ']', '=', '(', 'position', '[', '2', ']', ',', 'position', '[', '3', ']', ',', 'callers', ')', 'with', 'open', '(', "''", '.', 'join', '(', '[', 'work_dir', ',', "'/'", ',', 'chrom', ',', "'.vcf'", ']', ')', ',', "'w'", ')', 'as', 'outfile', ':', 'print', '(', "'##fileformat=VCFv4.0'", ',', 'file', '=', 'outfile', ')', 'print', '(', "'##INFO=<ID=callers,Number=.,Type=String,Description=List of supporting callers.'", ',', 'file', '=', 'outfile', ')', 'print', '(', "'#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO'", ',', 'file', '=', 'outfile', ')', 'for', 'chrom', 'in', 'chrom_sorted', '(', 'accepted_hits', '.', 'keys', '(', ')', ')', ':', 'for', 'position', 'in', 'sorted', '(', 'accepted_hits', '[', 'chrom', ']', ')', ':', 'print', '(', 'chrom', ',', 'position', ',', "'.'", ',', 'accepted_hits', '[', 'chrom', ']', '[', 'position', ']', '[', '0', ']', ',', 'accepted_hits', '[', 'chrom', ']', '[', 'position', ']', '[', '1', ']', ',', "'.'", ',', "'PASS'", ',', "'callers='", '+', 'accepted_hits', '[', 'chrom', ']', '[', 'position', ']', '[', '2', ']', ',', 'sep', '=', "'\\t'", ',', 'file', '=', 'outfile', ')', 'fsid', '=', 'job', '.', 'fileStore', '.', 'writeGlobalFile', '(', 'outfile', '.', 'name', ')', 'export_results', '(', 'job', ',', 'fsid', ',', 'outfile', '.', 'name', ',', 'univ_options', ',', 'subfolder', '=', "'mutations/merged'", ')', 'return', 'fsid'] | Merge the mutation calls for a single chromosome.
:param str chrom: Chromosome to process
:param dict mutations: dict of dicts of the various mutation caller names as keys, and a dict of
per chromosome job store ids for vcfs as value
:param dict univ_options: Dict of universal options used by almost all tools
:returns fsID for vcf contaning merged calls for the given chromosome
:rtype: toil.fileStore.FileID | ['Merge', 'the', 'mutation', 'calls', 'for', 'a', 'single', 'chromosome', '.'] | train | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/common.py#L74-L143 |
8,635 | sharibarboza/py_zap | py_zap/utils.py | convert_month | def convert_month(date, shorten=True, cable=True):
"""Replace month by shortening or lengthening it.
:param shorten: Set to True to shorten month name.
:param cable: Set to True if category is Cable.
"""
month = date.split()[0].lower()
if 'sept' in month:
shorten = False if cable else True
try:
if shorten:
month = SHORT_MONTHS[MONTHS.index(month)]
else:
month = MONTHS[SHORT_MONTHS.index(month)]
except ValueError:
month = month.title()
return '{0} {1}'.format(month, ' '.join(date.split()[1:])) | python | def convert_month(date, shorten=True, cable=True):
"""Replace month by shortening or lengthening it.
:param shorten: Set to True to shorten month name.
:param cable: Set to True if category is Cable.
"""
month = date.split()[0].lower()
if 'sept' in month:
shorten = False if cable else True
try:
if shorten:
month = SHORT_MONTHS[MONTHS.index(month)]
else:
month = MONTHS[SHORT_MONTHS.index(month)]
except ValueError:
month = month.title()
return '{0} {1}'.format(month, ' '.join(date.split()[1:])) | ['def', 'convert_month', '(', 'date', ',', 'shorten', '=', 'True', ',', 'cable', '=', 'True', ')', ':', 'month', '=', 'date', '.', 'split', '(', ')', '[', '0', ']', '.', 'lower', '(', ')', 'if', "'sept'", 'in', 'month', ':', 'shorten', '=', 'False', 'if', 'cable', 'else', 'True', 'try', ':', 'if', 'shorten', ':', 'month', '=', 'SHORT_MONTHS', '[', 'MONTHS', '.', 'index', '(', 'month', ')', ']', 'else', ':', 'month', '=', 'MONTHS', '[', 'SHORT_MONTHS', '.', 'index', '(', 'month', ')', ']', 'except', 'ValueError', ':', 'month', '=', 'month', '.', 'title', '(', ')', 'return', "'{0} {1}'", '.', 'format', '(', 'month', ',', "' '", '.', 'join', '(', 'date', '.', 'split', '(', ')', '[', '1', ':', ']', ')', ')'] | Replace month by shortening or lengthening it.
:param shorten: Set to True to shorten month name.
:param cable: Set to True if category is Cable. | ['Replace', 'month', 'by', 'shortening', 'or', 'lengthening', 'it', '.'] | train | https://github.com/sharibarboza/py_zap/blob/ce90853efcad66d3e28b8f1ac910f275349d016c/py_zap/utils.py#L71-L89 |
8,636 | mfcloud/python-zvm-sdk | smtLayer/powerVM.py | softDeactivate | def softDeactivate(rh):
"""
Deactivate a virtual machine by first shutting down Linux and
then log it off.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'SOFTOFF'
userid - userid of the virtual machine
parms['maxQueries'] - Maximum number of queries to issue.
Optional.
parms['maxWait'] - Maximum time to wait in seconds.
Optional,
unless 'maxQueries' is specified.
parms['poll'] - Polling interval in seconds. Optional,
unless 'maxQueries' is specified.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter powerVM.softDeactivate, userid: " +
rh.userid)
strCmd = "echo 'ping'"
iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd)
if iucvResults['overallRC'] == 0:
# We could talk to the machine, tell it to shutdown nicely.
strCmd = "shutdown -h now"
iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd)
if iucvResults['overallRC'] == 0:
time.sleep(15)
else:
# Shutdown failed. Let CP take down the system
# after we log the results.
rh.printSysLog("powerVM.softDeactivate " + rh.userid +
" is unreachable. Treating it as already shutdown.")
else:
# Could not ping the machine. Treat it as a success
# after we log the results.
rh.printSysLog("powerVM.softDeactivate " + rh.userid +
" is unreachable. Treating it as already shutdown.")
# Tell z/VM to log off the system.
parms = ["-T", rh.userid]
smcliResults = invokeSMCLI(rh, "Image_Deactivate", parms)
if smcliResults['overallRC'] == 0:
pass
elif (smcliResults['overallRC'] == 8 and smcliResults['rc'] == 200 and
(smcliResults['rs'] == 12 or + smcliResults['rs'] == 16)):
# Tolerable error.
# Machine is already logged off or is logging off.
rh.printLn("N", rh.userid + " is already logged off.")
else:
# SMAPI API failed.
rh.printLn("ES", smcliResults['response'])
rh.updateResults(smcliResults) # Use results from invokeSMCLI
if rh.results['overallRC'] == 0 and 'maxQueries' in rh.parms:
# Wait for the system to log off.
waitResults = waitForVMState(
rh,
rh.userid,
'off',
maxQueries=rh.parms['maxQueries'],
sleepSecs=rh.parms['poll'])
if waitResults['overallRC'] == 0:
rh.printLn("N", "Userid '" + rh.userid +
" is in the desired state: off")
else:
rh.updateResults(waitResults)
rh.printSysLog("Exit powerVM.softDeactivate, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | python | def softDeactivate(rh):
"""
Deactivate a virtual machine by first shutting down Linux and
then log it off.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'SOFTOFF'
userid - userid of the virtual machine
parms['maxQueries'] - Maximum number of queries to issue.
Optional.
parms['maxWait'] - Maximum time to wait in seconds.
Optional,
unless 'maxQueries' is specified.
parms['poll'] - Polling interval in seconds. Optional,
unless 'maxQueries' is specified.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter powerVM.softDeactivate, userid: " +
rh.userid)
strCmd = "echo 'ping'"
iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd)
if iucvResults['overallRC'] == 0:
# We could talk to the machine, tell it to shutdown nicely.
strCmd = "shutdown -h now"
iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd)
if iucvResults['overallRC'] == 0:
time.sleep(15)
else:
# Shutdown failed. Let CP take down the system
# after we log the results.
rh.printSysLog("powerVM.softDeactivate " + rh.userid +
" is unreachable. Treating it as already shutdown.")
else:
# Could not ping the machine. Treat it as a success
# after we log the results.
rh.printSysLog("powerVM.softDeactivate " + rh.userid +
" is unreachable. Treating it as already shutdown.")
# Tell z/VM to log off the system.
parms = ["-T", rh.userid]
smcliResults = invokeSMCLI(rh, "Image_Deactivate", parms)
if smcliResults['overallRC'] == 0:
pass
elif (smcliResults['overallRC'] == 8 and smcliResults['rc'] == 200 and
(smcliResults['rs'] == 12 or + smcliResults['rs'] == 16)):
# Tolerable error.
# Machine is already logged off or is logging off.
rh.printLn("N", rh.userid + " is already logged off.")
else:
# SMAPI API failed.
rh.printLn("ES", smcliResults['response'])
rh.updateResults(smcliResults) # Use results from invokeSMCLI
if rh.results['overallRC'] == 0 and 'maxQueries' in rh.parms:
# Wait for the system to log off.
waitResults = waitForVMState(
rh,
rh.userid,
'off',
maxQueries=rh.parms['maxQueries'],
sleepSecs=rh.parms['poll'])
if waitResults['overallRC'] == 0:
rh.printLn("N", "Userid '" + rh.userid +
" is in the desired state: off")
else:
rh.updateResults(waitResults)
rh.printSysLog("Exit powerVM.softDeactivate, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | ['def', 'softDeactivate', '(', 'rh', ')', ':', 'rh', '.', 'printSysLog', '(', '"Enter powerVM.softDeactivate, userid: "', '+', 'rh', '.', 'userid', ')', 'strCmd', '=', '"echo \'ping\'"', 'iucvResults', '=', 'execCmdThruIUCV', '(', 'rh', ',', 'rh', '.', 'userid', ',', 'strCmd', ')', 'if', 'iucvResults', '[', "'overallRC'", ']', '==', '0', ':', '# We could talk to the machine, tell it to shutdown nicely.', 'strCmd', '=', '"shutdown -h now"', 'iucvResults', '=', 'execCmdThruIUCV', '(', 'rh', ',', 'rh', '.', 'userid', ',', 'strCmd', ')', 'if', 'iucvResults', '[', "'overallRC'", ']', '==', '0', ':', 'time', '.', 'sleep', '(', '15', ')', 'else', ':', '# Shutdown failed. Let CP take down the system', '# after we log the results.', 'rh', '.', 'printSysLog', '(', '"powerVM.softDeactivate "', '+', 'rh', '.', 'userid', '+', '" is unreachable. Treating it as already shutdown."', ')', 'else', ':', '# Could not ping the machine. Treat it as a success', '# after we log the results.', 'rh', '.', 'printSysLog', '(', '"powerVM.softDeactivate "', '+', 'rh', '.', 'userid', '+', '" is unreachable. Treating it as already shutdown."', ')', '# Tell z/VM to log off the system.', 'parms', '=', '[', '"-T"', ',', 'rh', '.', 'userid', ']', 'smcliResults', '=', 'invokeSMCLI', '(', 'rh', ',', '"Image_Deactivate"', ',', 'parms', ')', 'if', 'smcliResults', '[', "'overallRC'", ']', '==', '0', ':', 'pass', 'elif', '(', 'smcliResults', '[', "'overallRC'", ']', '==', '8', 'and', 'smcliResults', '[', "'rc'", ']', '==', '200', 'and', '(', 'smcliResults', '[', "'rs'", ']', '==', '12', 'or', '+', 'smcliResults', '[', "'rs'", ']', '==', '16', ')', ')', ':', '# Tolerable error.', '# Machine is already logged off or is logging off.', 'rh', '.', 'printLn', '(', '"N"', ',', 'rh', '.', 'userid', '+', '" is already logged off."', ')', 'else', ':', '# SMAPI API failed.', 'rh', '.', 'printLn', '(', '"ES"', ',', 'smcliResults', '[', "'response'", ']', ')', 'rh', '.', 'updateResults', '(', 'smcliResults', ')', '# Use results from invokeSMCLI', 'if', 'rh', '.', 'results', '[', "'overallRC'", ']', '==', '0', 'and', "'maxQueries'", 'in', 'rh', '.', 'parms', ':', '# Wait for the system to log off.', 'waitResults', '=', 'waitForVMState', '(', 'rh', ',', 'rh', '.', 'userid', ',', "'off'", ',', 'maxQueries', '=', 'rh', '.', 'parms', '[', "'maxQueries'", ']', ',', 'sleepSecs', '=', 'rh', '.', 'parms', '[', "'poll'", ']', ')', 'if', 'waitResults', '[', "'overallRC'", ']', '==', '0', ':', 'rh', '.', 'printLn', '(', '"N"', ',', '"Userid \'"', '+', 'rh', '.', 'userid', '+', '" is in the desired state: off"', ')', 'else', ':', 'rh', '.', 'updateResults', '(', 'waitResults', ')', 'rh', '.', 'printSysLog', '(', '"Exit powerVM.softDeactivate, rc: "', '+', 'str', '(', 'rh', '.', 'results', '[', "'overallRC'", ']', ')', ')', 'return', 'rh', '.', 'results', '[', "'overallRC'", ']'] | Deactivate a virtual machine by first shutting down Linux and
then log it off.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'SOFTOFF'
userid - userid of the virtual machine
parms['maxQueries'] - Maximum number of queries to issue.
Optional.
parms['maxWait'] - Maximum time to wait in seconds.
Optional,
unless 'maxQueries' is specified.
parms['poll'] - Polling interval in seconds. Optional,
unless 'maxQueries' is specified.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error | ['Deactivate', 'a', 'virtual', 'machine', 'by', 'first', 'shutting', 'down', 'Linux', 'and', 'then', 'log', 'it', 'off', '.'] | train | https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/powerVM.py#L743-L820 |
8,637 | GoogleCloudPlatform/compute-image-packages | packages/python-google-compute-engine/google_compute_engine/metadata_watcher.py | MetadataWatcher._GetMetadataRequest | def _GetMetadataRequest(self, metadata_url, params=None, timeout=None):
"""Performs a GET request with the metadata headers.
Args:
metadata_url: string, the URL to perform a GET request on.
params: dictionary, the query parameters in the GET request.
timeout: int, timeout in seconds for metadata requests.
Returns:
HTTP response from the GET request.
Raises:
urlerror.HTTPError: raises when the GET request fails.
"""
headers = {'Metadata-Flavor': 'Google'}
params = urlparse.urlencode(params or {})
url = '%s?%s' % (metadata_url, params)
request = urlrequest.Request(url, headers=headers)
request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
timeout = timeout or self.timeout
return request_opener.open(request, timeout=timeout*1.1) | python | def _GetMetadataRequest(self, metadata_url, params=None, timeout=None):
"""Performs a GET request with the metadata headers.
Args:
metadata_url: string, the URL to perform a GET request on.
params: dictionary, the query parameters in the GET request.
timeout: int, timeout in seconds for metadata requests.
Returns:
HTTP response from the GET request.
Raises:
urlerror.HTTPError: raises when the GET request fails.
"""
headers = {'Metadata-Flavor': 'Google'}
params = urlparse.urlencode(params or {})
url = '%s?%s' % (metadata_url, params)
request = urlrequest.Request(url, headers=headers)
request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
timeout = timeout or self.timeout
return request_opener.open(request, timeout=timeout*1.1) | ['def', '_GetMetadataRequest', '(', 'self', ',', 'metadata_url', ',', 'params', '=', 'None', ',', 'timeout', '=', 'None', ')', ':', 'headers', '=', '{', "'Metadata-Flavor'", ':', "'Google'", '}', 'params', '=', 'urlparse', '.', 'urlencode', '(', 'params', 'or', '{', '}', ')', 'url', '=', "'%s?%s'", '%', '(', 'metadata_url', ',', 'params', ')', 'request', '=', 'urlrequest', '.', 'Request', '(', 'url', ',', 'headers', '=', 'headers', ')', 'request_opener', '=', 'urlrequest', '.', 'build_opener', '(', 'urlrequest', '.', 'ProxyHandler', '(', '{', '}', ')', ')', 'timeout', '=', 'timeout', 'or', 'self', '.', 'timeout', 'return', 'request_opener', '.', 'open', '(', 'request', ',', 'timeout', '=', 'timeout', '*', '1.1', ')'] | Performs a GET request with the metadata headers.
Args:
metadata_url: string, the URL to perform a GET request on.
params: dictionary, the query parameters in the GET request.
timeout: int, timeout in seconds for metadata requests.
Returns:
HTTP response from the GET request.
Raises:
urlerror.HTTPError: raises when the GET request fails. | ['Performs', 'a', 'GET', 'request', 'with', 'the', 'metadata', 'headers', '.'] | train | https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/metadata_watcher.py#L82-L102 |
8,638 | avalente/appmetrics | appmetrics/metrics.py | new_histogram_with_implicit_reservoir | def new_histogram_with_implicit_reservoir(name, reservoir_type='uniform', *reservoir_args, **reservoir_kwargs):
"""
Build a new histogram metric and a reservoir from the given parameters
"""
reservoir = new_reservoir(reservoir_type, *reservoir_args, **reservoir_kwargs)
return new_histogram(name, reservoir) | python | def new_histogram_with_implicit_reservoir(name, reservoir_type='uniform', *reservoir_args, **reservoir_kwargs):
"""
Build a new histogram metric and a reservoir from the given parameters
"""
reservoir = new_reservoir(reservoir_type, *reservoir_args, **reservoir_kwargs)
return new_histogram(name, reservoir) | ['def', 'new_histogram_with_implicit_reservoir', '(', 'name', ',', 'reservoir_type', '=', "'uniform'", ',', '*', 'reservoir_args', ',', '*', '*', 'reservoir_kwargs', ')', ':', 'reservoir', '=', 'new_reservoir', '(', 'reservoir_type', ',', '*', 'reservoir_args', ',', '*', '*', 'reservoir_kwargs', ')', 'return', 'new_histogram', '(', 'name', ',', 'reservoir', ')'] | Build a new histogram metric and a reservoir from the given parameters | ['Build', 'a', 'new', 'histogram', 'metric', 'and', 'a', 'reservoir', 'from', 'the', 'given', 'parameters'] | train | https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/metrics.py#L142-L148 |
8,639 | artefactual-labs/agentarchives | agentarchives/archivists_toolkit/client.py | ArchivistsToolkitClient.get_resource_component_and_children | def get_resource_component_and_children(
self, resource_id, resource_type="collection", level=1, sort_data={}, **kwargs
):
"""
Fetch detailed metadata for the specified resource_id and all of its children.
:param long resource_id: The resource for which to fetch metadata.
:param string resource_type: The level of description of the record.
:param int recurse_max_level: The maximum depth level to fetch when fetching children.
Default is to fetch all of the resource's children, descending as deeply as necessary.
Pass 1 to fetch no children.
:param string search_pattern: If specified, limits fetched children to those whose titles or IDs match the provided query.
See ArchivistsToolkitClient.find_collection_ids for documentation of the query format.
:return: A dict containing detailed metadata about both the requested resource and its children.
The dict follows this format:
{
'id': '31',
'type': 'resource',
'sortPosition': '1',
'identifier': 'PR01',
'title': 'Parent',
'levelOfDescription': 'collection',
'dates': '1880-1889',
'date_expression': '1880 to 1889',
'notes': [
'type': 'odd',
'content': 'This is a note',
],
'children': [{
'id': '23',
'type': 'resource_component',
'sortPosition': '2',
'identifier': 'CH01',
'title': 'Child A',
'levelOfDescription': 'Sousfonds',
'dates': '1880-1888',
'date_expression': '1880 to 1888',
'notes': [],
'children': [{
'id': '24',
'type': 'resource_component',
'sortPosition': '3',
'identifier': 'GR01',
'title': 'Grandchild A',
'levelOfDescription': 'Item',
'dates': '1880-1888',
'date_expression': '1880 to 1888',
'notes': [],
'children': False
},
{
'id': '25',
'type': 'resource_component',
'sortPosition': '4',
'identifier': 'GR02',
'title': 'Grandchild B',
'levelOfDescription': 'Item',
'notes': [],
'children': False
}]
},
{
'id': '26',
'type': 'resource_component',
'sortPosition': '5',
'identifier': 'CH02',
'title': 'Child B',
'levelOfDescription': 'Sousfonds',
'dates': '1889',
'date_expression': '1889',
'notes': [],
'children': False
}]
}
:rtype list:
"""
# we pass the sort position as a dict so it passes by reference and we
# can use it to share state during recursion
recurse_max_level = kwargs.get("recurse_max_level", False)
query = kwargs.get("search_pattern", "")
# intialize sort position if this is the beginning of recursion
if level == 1:
sort_data["position"] = 0
sort_data["position"] = sort_data["position"] + 1
resource_data = {}
cursor = self.db.cursor()
if resource_type == "collection":
cursor.execute(
"SELECT title, dateExpression, resourceIdentifier1, resourceLevel FROM Resources WHERE resourceid=%s",
(resource_id),
)
for row in cursor.fetchall():
resource_data["id"] = resource_id
resource_data["type"] = "resource"
resource_data["sortPosition"] = sort_data["position"]
resource_data["title"] = row[0]
# TODO reformat dates from the separate date fields, like ArchivesSpaceClient?
resource_data["dates"] = row[1]
resource_data["date_expression"] = row[1]
resource_data["identifier"] = row[2]
resource_data["levelOfDescription"] = row[3]
else:
cursor.execute(
"SELECT title, dateExpression, persistentID, resourceLevel FROM ResourcesComponents WHERE resourceComponentId=%s",
(resource_id),
)
for row in cursor.fetchall():
resource_data["id"] = resource_id
resource_data["type"] = "resource_component"
resource_data["sortPosition"] = sort_data["position"]
resource_data["title"] = row[0]
resource_data["dates"] = row[1]
resource_data["date_expression"] = row[1]
resource_data["identifier"] = row[2]
resource_data["levelOfDescription"] = row[3]
# fetch children if we haven't reached the maximum recursion level
if resource_type == "collection":
if query == "":
cursor.execute(
"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId IS NULL AND resourceId=%s ORDER BY FIND_IN_SET(resourceLevel, 'subseries,file'), title ASC",
(resource_id),
)
else:
cursor.execute(
"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId IS NULL AND resourceId=%s AND (title LIKE %s OR persistentID LIKE %s) ORDER BY FIND_IN_SET(resourceLevel, 'subseries,file'), title ASC",
(resource_id, "%" + query + "%", "%" + query + "%"),
)
else:
if query == "":
cursor.execute(
"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId=%s ORDER BY FIND_IN_SET(resourceLevel, 'subseries,file'), title ASC",
(resource_id),
)
else:
cursor.execute(
"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId=%s AND (title LIKE %s OR persistentID LIKE %s) ORDER BY FIND_IN_SET(resourceLevel, 'subseries,file'), title ASC",
(resource_id, "%" + query + "%", "%" + query + "%"),
)
rows = cursor.fetchall()
if (not recurse_max_level) or level < recurse_max_level:
if len(rows):
resource_data["children"] = []
resource_data["has_children"] = True
for row in rows:
resource_data["children"].append(
self.get_resource_component_and_children(
row[0], "description", level + 1, sort_data
)
)
else:
if len(rows):
resource_data["children"] = []
resource_data["has_children"] = True
else:
resource_data["children"] = False
resource_data["has_children"] = False
# TODO: implement fetching notes
resource_data["notes"] = []
return resource_data | python | def get_resource_component_and_children(
self, resource_id, resource_type="collection", level=1, sort_data={}, **kwargs
):
"""
Fetch detailed metadata for the specified resource_id and all of its children.
:param long resource_id: The resource for which to fetch metadata.
:param string resource_type: The level of description of the record.
:param int recurse_max_level: The maximum depth level to fetch when fetching children.
Default is to fetch all of the resource's children, descending as deeply as necessary.
Pass 1 to fetch no children.
:param string search_pattern: If specified, limits fetched children to those whose titles or IDs match the provided query.
See ArchivistsToolkitClient.find_collection_ids for documentation of the query format.
:return: A dict containing detailed metadata about both the requested resource and its children.
The dict follows this format:
{
'id': '31',
'type': 'resource',
'sortPosition': '1',
'identifier': 'PR01',
'title': 'Parent',
'levelOfDescription': 'collection',
'dates': '1880-1889',
'date_expression': '1880 to 1889',
'notes': [
'type': 'odd',
'content': 'This is a note',
],
'children': [{
'id': '23',
'type': 'resource_component',
'sortPosition': '2',
'identifier': 'CH01',
'title': 'Child A',
'levelOfDescription': 'Sousfonds',
'dates': '1880-1888',
'date_expression': '1880 to 1888',
'notes': [],
'children': [{
'id': '24',
'type': 'resource_component',
'sortPosition': '3',
'identifier': 'GR01',
'title': 'Grandchild A',
'levelOfDescription': 'Item',
'dates': '1880-1888',
'date_expression': '1880 to 1888',
'notes': [],
'children': False
},
{
'id': '25',
'type': 'resource_component',
'sortPosition': '4',
'identifier': 'GR02',
'title': 'Grandchild B',
'levelOfDescription': 'Item',
'notes': [],
'children': False
}]
},
{
'id': '26',
'type': 'resource_component',
'sortPosition': '5',
'identifier': 'CH02',
'title': 'Child B',
'levelOfDescription': 'Sousfonds',
'dates': '1889',
'date_expression': '1889',
'notes': [],
'children': False
}]
}
:rtype list:
"""
# we pass the sort position as a dict so it passes by reference and we
# can use it to share state during recursion
recurse_max_level = kwargs.get("recurse_max_level", False)
query = kwargs.get("search_pattern", "")
# intialize sort position if this is the beginning of recursion
if level == 1:
sort_data["position"] = 0
sort_data["position"] = sort_data["position"] + 1
resource_data = {}
cursor = self.db.cursor()
if resource_type == "collection":
cursor.execute(
"SELECT title, dateExpression, resourceIdentifier1, resourceLevel FROM Resources WHERE resourceid=%s",
(resource_id),
)
for row in cursor.fetchall():
resource_data["id"] = resource_id
resource_data["type"] = "resource"
resource_data["sortPosition"] = sort_data["position"]
resource_data["title"] = row[0]
# TODO reformat dates from the separate date fields, like ArchivesSpaceClient?
resource_data["dates"] = row[1]
resource_data["date_expression"] = row[1]
resource_data["identifier"] = row[2]
resource_data["levelOfDescription"] = row[3]
else:
cursor.execute(
"SELECT title, dateExpression, persistentID, resourceLevel FROM ResourcesComponents WHERE resourceComponentId=%s",
(resource_id),
)
for row in cursor.fetchall():
resource_data["id"] = resource_id
resource_data["type"] = "resource_component"
resource_data["sortPosition"] = sort_data["position"]
resource_data["title"] = row[0]
resource_data["dates"] = row[1]
resource_data["date_expression"] = row[1]
resource_data["identifier"] = row[2]
resource_data["levelOfDescription"] = row[3]
# fetch children if we haven't reached the maximum recursion level
if resource_type == "collection":
if query == "":
cursor.execute(
"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId IS NULL AND resourceId=%s ORDER BY FIND_IN_SET(resourceLevel, 'subseries,file'), title ASC",
(resource_id),
)
else:
cursor.execute(
"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId IS NULL AND resourceId=%s AND (title LIKE %s OR persistentID LIKE %s) ORDER BY FIND_IN_SET(resourceLevel, 'subseries,file'), title ASC",
(resource_id, "%" + query + "%", "%" + query + "%"),
)
else:
if query == "":
cursor.execute(
"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId=%s ORDER BY FIND_IN_SET(resourceLevel, 'subseries,file'), title ASC",
(resource_id),
)
else:
cursor.execute(
"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId=%s AND (title LIKE %s OR persistentID LIKE %s) ORDER BY FIND_IN_SET(resourceLevel, 'subseries,file'), title ASC",
(resource_id, "%" + query + "%", "%" + query + "%"),
)
rows = cursor.fetchall()
if (not recurse_max_level) or level < recurse_max_level:
if len(rows):
resource_data["children"] = []
resource_data["has_children"] = True
for row in rows:
resource_data["children"].append(
self.get_resource_component_and_children(
row[0], "description", level + 1, sort_data
)
)
else:
if len(rows):
resource_data["children"] = []
resource_data["has_children"] = True
else:
resource_data["children"] = False
resource_data["has_children"] = False
# TODO: implement fetching notes
resource_data["notes"] = []
return resource_data | ['def', 'get_resource_component_and_children', '(', 'self', ',', 'resource_id', ',', 'resource_type', '=', '"collection"', ',', 'level', '=', '1', ',', 'sort_data', '=', '{', '}', ',', '*', '*', 'kwargs', ')', ':', '# we pass the sort position as a dict so it passes by reference and we', '# can use it to share state during recursion', 'recurse_max_level', '=', 'kwargs', '.', 'get', '(', '"recurse_max_level"', ',', 'False', ')', 'query', '=', 'kwargs', '.', 'get', '(', '"search_pattern"', ',', '""', ')', '# intialize sort position if this is the beginning of recursion', 'if', 'level', '==', '1', ':', 'sort_data', '[', '"position"', ']', '=', '0', 'sort_data', '[', '"position"', ']', '=', 'sort_data', '[', '"position"', ']', '+', '1', 'resource_data', '=', '{', '}', 'cursor', '=', 'self', '.', 'db', '.', 'cursor', '(', ')', 'if', 'resource_type', '==', '"collection"', ':', 'cursor', '.', 'execute', '(', '"SELECT title, dateExpression, resourceIdentifier1, resourceLevel FROM Resources WHERE resourceid=%s"', ',', '(', 'resource_id', ')', ',', ')', 'for', 'row', 'in', 'cursor', '.', 'fetchall', '(', ')', ':', 'resource_data', '[', '"id"', ']', '=', 'resource_id', 'resource_data', '[', '"type"', ']', '=', '"resource"', 'resource_data', '[', '"sortPosition"', ']', '=', 'sort_data', '[', '"position"', ']', 'resource_data', '[', '"title"', ']', '=', 'row', '[', '0', ']', '# TODO reformat dates from the separate date fields, like ArchivesSpaceClient?', 'resource_data', '[', '"dates"', ']', '=', 'row', '[', '1', ']', 'resource_data', '[', '"date_expression"', ']', '=', 'row', '[', '1', ']', 'resource_data', '[', '"identifier"', ']', '=', 'row', '[', '2', ']', 'resource_data', '[', '"levelOfDescription"', ']', '=', 'row', '[', '3', ']', 'else', ':', 'cursor', '.', 'execute', '(', '"SELECT title, dateExpression, persistentID, resourceLevel FROM ResourcesComponents WHERE resourceComponentId=%s"', ',', '(', 'resource_id', ')', ',', ')', 'for', 'row', 'in', 'cursor', '.', 'fetchall', '(', ')', ':', 'resource_data', '[', '"id"', ']', '=', 'resource_id', 'resource_data', '[', '"type"', ']', '=', '"resource_component"', 'resource_data', '[', '"sortPosition"', ']', '=', 'sort_data', '[', '"position"', ']', 'resource_data', '[', '"title"', ']', '=', 'row', '[', '0', ']', 'resource_data', '[', '"dates"', ']', '=', 'row', '[', '1', ']', 'resource_data', '[', '"date_expression"', ']', '=', 'row', '[', '1', ']', 'resource_data', '[', '"identifier"', ']', '=', 'row', '[', '2', ']', 'resource_data', '[', '"levelOfDescription"', ']', '=', 'row', '[', '3', ']', "# fetch children if we haven't reached the maximum recursion level", 'if', 'resource_type', '==', '"collection"', ':', 'if', 'query', '==', '""', ':', 'cursor', '.', 'execute', '(', '"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId IS NULL AND resourceId=%s ORDER BY FIND_IN_SET(resourceLevel, \'subseries,file\'), title ASC"', ',', '(', 'resource_id', ')', ',', ')', 'else', ':', 'cursor', '.', 'execute', '(', '"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId IS NULL AND resourceId=%s AND (title LIKE %s OR persistentID LIKE %s) ORDER BY FIND_IN_SET(resourceLevel, \'subseries,file\'), title ASC"', ',', '(', 'resource_id', ',', '"%"', '+', 'query', '+', '"%"', ',', '"%"', '+', 'query', '+', '"%"', ')', ',', ')', 'else', ':', 'if', 'query', '==', '""', ':', 'cursor', '.', 'execute', '(', '"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId=%s ORDER BY FIND_IN_SET(resourceLevel, \'subseries,file\'), title ASC"', ',', '(', 'resource_id', ')', ',', ')', 'else', ':', 'cursor', '.', 'execute', '(', '"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId=%s AND (title LIKE %s OR persistentID LIKE %s) ORDER BY FIND_IN_SET(resourceLevel, \'subseries,file\'), title ASC"', ',', '(', 'resource_id', ',', '"%"', '+', 'query', '+', '"%"', ',', '"%"', '+', 'query', '+', '"%"', ')', ',', ')', 'rows', '=', 'cursor', '.', 'fetchall', '(', ')', 'if', '(', 'not', 'recurse_max_level', ')', 'or', 'level', '<', 'recurse_max_level', ':', 'if', 'len', '(', 'rows', ')', ':', 'resource_data', '[', '"children"', ']', '=', '[', ']', 'resource_data', '[', '"has_children"', ']', '=', 'True', 'for', 'row', 'in', 'rows', ':', 'resource_data', '[', '"children"', ']', '.', 'append', '(', 'self', '.', 'get_resource_component_and_children', '(', 'row', '[', '0', ']', ',', '"description"', ',', 'level', '+', '1', ',', 'sort_data', ')', ')', 'else', ':', 'if', 'len', '(', 'rows', ')', ':', 'resource_data', '[', '"children"', ']', '=', '[', ']', 'resource_data', '[', '"has_children"', ']', '=', 'True', 'else', ':', 'resource_data', '[', '"children"', ']', '=', 'False', 'resource_data', '[', '"has_children"', ']', '=', 'False', '# TODO: implement fetching notes', 'resource_data', '[', '"notes"', ']', '=', '[', ']', 'return', 'resource_data'] | Fetch detailed metadata for the specified resource_id and all of its children.
:param long resource_id: The resource for which to fetch metadata.
:param string resource_type: The level of description of the record.
:param int recurse_max_level: The maximum depth level to fetch when fetching children.
Default is to fetch all of the resource's children, descending as deeply as necessary.
Pass 1 to fetch no children.
:param string search_pattern: If specified, limits fetched children to those whose titles or IDs match the provided query.
See ArchivistsToolkitClient.find_collection_ids for documentation of the query format.
:return: A dict containing detailed metadata about both the requested resource and its children.
The dict follows this format:
{
'id': '31',
'type': 'resource',
'sortPosition': '1',
'identifier': 'PR01',
'title': 'Parent',
'levelOfDescription': 'collection',
'dates': '1880-1889',
'date_expression': '1880 to 1889',
'notes': [
'type': 'odd',
'content': 'This is a note',
],
'children': [{
'id': '23',
'type': 'resource_component',
'sortPosition': '2',
'identifier': 'CH01',
'title': 'Child A',
'levelOfDescription': 'Sousfonds',
'dates': '1880-1888',
'date_expression': '1880 to 1888',
'notes': [],
'children': [{
'id': '24',
'type': 'resource_component',
'sortPosition': '3',
'identifier': 'GR01',
'title': 'Grandchild A',
'levelOfDescription': 'Item',
'dates': '1880-1888',
'date_expression': '1880 to 1888',
'notes': [],
'children': False
},
{
'id': '25',
'type': 'resource_component',
'sortPosition': '4',
'identifier': 'GR02',
'title': 'Grandchild B',
'levelOfDescription': 'Item',
'notes': [],
'children': False
}]
},
{
'id': '26',
'type': 'resource_component',
'sortPosition': '5',
'identifier': 'CH02',
'title': 'Child B',
'levelOfDescription': 'Sousfonds',
'dates': '1889',
'date_expression': '1889',
'notes': [],
'children': False
}]
}
:rtype list: | ['Fetch', 'detailed', 'metadata', 'for', 'the', 'specified', 'resource_id', 'and', 'all', 'of', 'its', 'children', '.'] | train | https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivists_toolkit/client.py#L159-L332 |
8,640 | OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.expand_countryname_abbrevs | def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates | python | def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates | ['def', 'expand_countryname_abbrevs', '(', 'cls', ',', 'country', ')', ':', '# type: (str) -> List[str]', 'def', 'replace_ensure_space', '(', 'word', ',', 'replace', ',', 'replacement', ')', ':', 'return', 'word', '.', 'replace', '(', 'replace', ',', "'%s '", '%', 'replacement', ')', '.', 'replace', '(', "' '", ',', "' '", ')', '.', 'strip', '(', ')', 'countryupper', '=', 'country', '.', 'upper', '(', ')', 'for', 'abbreviation', 'in', 'cls', '.', 'abbreviations', ':', 'countryupper', '=', 'replace_ensure_space', '(', 'countryupper', ',', 'abbreviation', ',', 'cls', '.', 'abbreviations', '[', 'abbreviation', ']', ')', 'candidates', '=', '[', 'countryupper', ']', 'for', 'abbreviation', 'in', 'cls', '.', 'multiple_abbreviations', ':', 'if', 'abbreviation', 'in', 'countryupper', ':', 'for', 'expanded', 'in', 'cls', '.', 'multiple_abbreviations', '[', 'abbreviation', ']', ':', 'candidates', '.', 'append', '(', 'replace_ensure_space', '(', 'countryupper', ',', 'abbreviation', ',', 'expanded', ')', ')', 'return', 'candidates'] | Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways | ['Expands', 'abbreviation', '(', 's', ')', 'in', 'country', 'name', 'in', 'various', 'ways', '(', 'eg', '.', 'FED', '-', '>', 'FEDERATED', 'FEDERAL', 'etc', '.', ')'] | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L386-L406 |
8,641 | python-gitlab/python-gitlab | gitlab/v4/objects.py | Project.languages | def languages(self, **kwargs):
"""Get languages used in the project with percentage value.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
"""
path = '/projects/%s/languages' % self.get_id()
return self.manager.gitlab.http_get(path, **kwargs) | python | def languages(self, **kwargs):
"""Get languages used in the project with percentage value.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
"""
path = '/projects/%s/languages' % self.get_id()
return self.manager.gitlab.http_get(path, **kwargs) | ['def', 'languages', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'path', '=', "'/projects/%s/languages'", '%', 'self', '.', 'get_id', '(', ')', 'return', 'self', '.', 'manager', '.', 'gitlab', '.', 'http_get', '(', 'path', ',', '*', '*', 'kwargs', ')'] | Get languages used in the project with percentage value.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request | ['Get', 'languages', 'used', 'in', 'the', 'project', 'with', 'percentage', 'value', '.'] | train | https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/v4/objects.py#L3503-L3514 |
8,642 | PierreRust/apigpio | apigpio/apigpio.py | Pi.script_status | def script_status(self, script_id):
"""
Returns the run status of a stored script as well as the
current values of parameters 0 to 9.
script_id:= id of stored script.
The run status may be
. .
PI_SCRIPT_INITING
PI_SCRIPT_HALTED
PI_SCRIPT_RUNNING
PI_SCRIPT_WAITING
PI_SCRIPT_FAILED
. .
The return value is a tuple of run status and a list of
the 10 parameters. On error the run status will be negative
and the parameter list will be empty.
...
(s, pars) = pi.script_status(sid)
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_PROCP, script_id, 0)
bytes = u2i(res)
if bytes > 0:
# Fixme : this sould be the same a _rxbuf
# data = self._rxbuf(bytes)
data = yield from self._loop.sock_recv(self.s, bytes)
while len(data) < bytes:
b = yield from self._loop.sock_recv(self.s, bytes-len(data))
data.extend(b)
pars = struct.unpack('11i', _str(data))
status = pars[0]
params = pars[1:]
else:
status = bytes
params = ()
return status, params | python | def script_status(self, script_id):
"""
Returns the run status of a stored script as well as the
current values of parameters 0 to 9.
script_id:= id of stored script.
The run status may be
. .
PI_SCRIPT_INITING
PI_SCRIPT_HALTED
PI_SCRIPT_RUNNING
PI_SCRIPT_WAITING
PI_SCRIPT_FAILED
. .
The return value is a tuple of run status and a list of
the 10 parameters. On error the run status will be negative
and the parameter list will be empty.
...
(s, pars) = pi.script_status(sid)
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_PROCP, script_id, 0)
bytes = u2i(res)
if bytes > 0:
# Fixme : this sould be the same a _rxbuf
# data = self._rxbuf(bytes)
data = yield from self._loop.sock_recv(self.s, bytes)
while len(data) < bytes:
b = yield from self._loop.sock_recv(self.s, bytes-len(data))
data.extend(b)
pars = struct.unpack('11i', _str(data))
status = pars[0]
params = pars[1:]
else:
status = bytes
params = ()
return status, params | ['def', 'script_status', '(', 'self', ',', 'script_id', ')', ':', 'res', '=', 'yield', 'from', 'self', '.', '_pigpio_aio_command', '(', '_PI_CMD_PROCP', ',', 'script_id', ',', '0', ')', 'bytes', '=', 'u2i', '(', 'res', ')', 'if', 'bytes', '>', '0', ':', '# Fixme : this sould be the same a _rxbuf', '# data = self._rxbuf(bytes)', 'data', '=', 'yield', 'from', 'self', '.', '_loop', '.', 'sock_recv', '(', 'self', '.', 's', ',', 'bytes', ')', 'while', 'len', '(', 'data', ')', '<', 'bytes', ':', 'b', '=', 'yield', 'from', 'self', '.', '_loop', '.', 'sock_recv', '(', 'self', '.', 's', ',', 'bytes', '-', 'len', '(', 'data', ')', ')', 'data', '.', 'extend', '(', 'b', ')', 'pars', '=', 'struct', '.', 'unpack', '(', "'11i'", ',', '_str', '(', 'data', ')', ')', 'status', '=', 'pars', '[', '0', ']', 'params', '=', 'pars', '[', '1', ':', ']', 'else', ':', 'status', '=', 'bytes', 'params', '=', '(', ')', 'return', 'status', ',', 'params'] | Returns the run status of a stored script as well as the
current values of parameters 0 to 9.
script_id:= id of stored script.
The run status may be
. .
PI_SCRIPT_INITING
PI_SCRIPT_HALTED
PI_SCRIPT_RUNNING
PI_SCRIPT_WAITING
PI_SCRIPT_FAILED
. .
The return value is a tuple of run status and a list of
the 10 parameters. On error the run status will be negative
and the parameter list will be empty.
...
(s, pars) = pi.script_status(sid)
... | ['Returns', 'the', 'run', 'status', 'of', 'a', 'stored', 'script', 'as', 'well', 'as', 'the', 'current', 'values', 'of', 'parameters', '0', 'to', '9', '.'] | train | https://github.com/PierreRust/apigpio/blob/2b969f40e06219b43a43498d8baf87f5935ceab2/apigpio/apigpio.py#L659-L702 |
8,643 | IntegralDefense/cbinterface | cbinterface/modules/response.py | hyperLiveResponse.dump_sensor_memory | def dump_sensor_memory(self, cb_compress=False, custom_compress=False, custom_compress_file=None, auto_collect_result=False):
"""Customized function for dumping sensor memory.
:arguments cb_compress: If True, use CarbonBlack's built-in compression.
:arguments custom_compress_file: Supply path to lr_tools/compress_file.bat to fork powershell compression
:collect_mem_file: If True, wait for memdump + and compression to complete, then use cbapi to collect
"""
print("~ dumping contents of memory on {}".format(self.sensor.computer_name))
local_file = remote_file = "{}.memdmp".format(self.sensor.computer_name)
if not self.lr_session:
self.go_live()
try:
if cb_compress and auto_collect_result:
logging.info("CB compression and auto-collection set")
self.lr_session.memdump(remote_filename=remote_file, compress=cb_compress)
return True
dump_object = self.lr_session.start_memdump(remote_filename=remote_file, compress=cb_compress)
dump_object.wait()
if cb_compress:
print("+ Memory dump compressed at -> C:\windows\carbonblack\{}.zip".format(remote_file))
if auto_collect_result:
self.getFile_with_timeout("C:\\Windows\\CarbonBlack\\{}.zip".format(remote_file))
return True
print("+ Memory dump complete on host -> C:\windows\carbonblack\{}".format(remote_file))
except LiveResponseError as e:
raise Exception("LiveResponseError: {}".format(e))
if custom_compress: # compress with powershell?
if not os.path.exists(custom_compress_file):
logging.debug("{} not found.".format(custom_compress_file))
HOME_DIR = os.path.abspath(os.path.join(os.path.realpath(__file__),'..','..'))
custom_compress_file = os.path.join(HOME_DIR, 'lr_tools', 'compress_file.bat')
if not os.path.exists(custom_compress_file):
logging.error("{} not found.".format(custom_compress_file))
return False
logging.info("Using {}".format(custom_compress_file))
bat_filename = custom_compress_file[custom_compress_file.rfind('/')+1:]
filedata = None
with open(custom_compress_file, 'rb') as f:
filedata = f.read()
try:
self.lr_session.put_file(filedata, "C:\\Windows\\CarbonBlack\\" + bat_filename)
except LiveResponseError as e:
if 'ERROR_FILE_EXISTS' not in str(e):
logging.error("Error puting compress_file.bat")
return False
else:
self.lr_session.delete_file("C:\\Windows\\CarbonBlack\\" + bat_filename)
self.lr_session.put_file(filedata, "C:\\Windows\\CarbonBlack\\" + bat_filename)
print("~ Launching "+ bat_filename +" to create C:\\windows\\carbonblack\\_memdump.zip")
compress_cmd = "C:\\Windows\\CarbonBlack\\" + bat_filename + " " + remote_file
self.lr_session.create_process(compress_cmd, wait_for_output=False, wait_for_completion=False)
if auto_collect_result:
print("~ waiting for {} to complete.".format(bat_filename))
self.wait_for_process_to_finish(bat_filename)
self.getFile_with_timeout("C:\\windows\\carbonblack\\_memdump.zip")
print("[!] If compression successful, _memdump.zip will exist, and {} should be deleted.".format(remote_file))
# here, they didn't want to use cb or custom compression, but they did want to auto collect results
if auto_collect_result:
self.getFile_with_timeout("C:\\Windows\\CarbonBlack\\{}".format(remote_file))
return True | python | def dump_sensor_memory(self, cb_compress=False, custom_compress=False, custom_compress_file=None, auto_collect_result=False):
"""Customized function for dumping sensor memory.
:arguments cb_compress: If True, use CarbonBlack's built-in compression.
:arguments custom_compress_file: Supply path to lr_tools/compress_file.bat to fork powershell compression
:collect_mem_file: If True, wait for memdump + and compression to complete, then use cbapi to collect
"""
print("~ dumping contents of memory on {}".format(self.sensor.computer_name))
local_file = remote_file = "{}.memdmp".format(self.sensor.computer_name)
if not self.lr_session:
self.go_live()
try:
if cb_compress and auto_collect_result:
logging.info("CB compression and auto-collection set")
self.lr_session.memdump(remote_filename=remote_file, compress=cb_compress)
return True
dump_object = self.lr_session.start_memdump(remote_filename=remote_file, compress=cb_compress)
dump_object.wait()
if cb_compress:
print("+ Memory dump compressed at -> C:\windows\carbonblack\{}.zip".format(remote_file))
if auto_collect_result:
self.getFile_with_timeout("C:\\Windows\\CarbonBlack\\{}.zip".format(remote_file))
return True
print("+ Memory dump complete on host -> C:\windows\carbonblack\{}".format(remote_file))
except LiveResponseError as e:
raise Exception("LiveResponseError: {}".format(e))
if custom_compress: # compress with powershell?
if not os.path.exists(custom_compress_file):
logging.debug("{} not found.".format(custom_compress_file))
HOME_DIR = os.path.abspath(os.path.join(os.path.realpath(__file__),'..','..'))
custom_compress_file = os.path.join(HOME_DIR, 'lr_tools', 'compress_file.bat')
if not os.path.exists(custom_compress_file):
logging.error("{} not found.".format(custom_compress_file))
return False
logging.info("Using {}".format(custom_compress_file))
bat_filename = custom_compress_file[custom_compress_file.rfind('/')+1:]
filedata = None
with open(custom_compress_file, 'rb') as f:
filedata = f.read()
try:
self.lr_session.put_file(filedata, "C:\\Windows\\CarbonBlack\\" + bat_filename)
except LiveResponseError as e:
if 'ERROR_FILE_EXISTS' not in str(e):
logging.error("Error puting compress_file.bat")
return False
else:
self.lr_session.delete_file("C:\\Windows\\CarbonBlack\\" + bat_filename)
self.lr_session.put_file(filedata, "C:\\Windows\\CarbonBlack\\" + bat_filename)
print("~ Launching "+ bat_filename +" to create C:\\windows\\carbonblack\\_memdump.zip")
compress_cmd = "C:\\Windows\\CarbonBlack\\" + bat_filename + " " + remote_file
self.lr_session.create_process(compress_cmd, wait_for_output=False, wait_for_completion=False)
if auto_collect_result:
print("~ waiting for {} to complete.".format(bat_filename))
self.wait_for_process_to_finish(bat_filename)
self.getFile_with_timeout("C:\\windows\\carbonblack\\_memdump.zip")
print("[!] If compression successful, _memdump.zip will exist, and {} should be deleted.".format(remote_file))
# here, they didn't want to use cb or custom compression, but they did want to auto collect results
if auto_collect_result:
self.getFile_with_timeout("C:\\Windows\\CarbonBlack\\{}".format(remote_file))
return True | ['def', 'dump_sensor_memory', '(', 'self', ',', 'cb_compress', '=', 'False', ',', 'custom_compress', '=', 'False', ',', 'custom_compress_file', '=', 'None', ',', 'auto_collect_result', '=', 'False', ')', ':', 'print', '(', '"~ dumping contents of memory on {}"', '.', 'format', '(', 'self', '.', 'sensor', '.', 'computer_name', ')', ')', 'local_file', '=', 'remote_file', '=', '"{}.memdmp"', '.', 'format', '(', 'self', '.', 'sensor', '.', 'computer_name', ')', 'if', 'not', 'self', '.', 'lr_session', ':', 'self', '.', 'go_live', '(', ')', 'try', ':', 'if', 'cb_compress', 'and', 'auto_collect_result', ':', 'logging', '.', 'info', '(', '"CB compression and auto-collection set"', ')', 'self', '.', 'lr_session', '.', 'memdump', '(', 'remote_filename', '=', 'remote_file', ',', 'compress', '=', 'cb_compress', ')', 'return', 'True', 'dump_object', '=', 'self', '.', 'lr_session', '.', 'start_memdump', '(', 'remote_filename', '=', 'remote_file', ',', 'compress', '=', 'cb_compress', ')', 'dump_object', '.', 'wait', '(', ')', 'if', 'cb_compress', ':', 'print', '(', '"+ Memory dump compressed at -> C:\\windows\\carbonblack\\{}.zip"', '.', 'format', '(', 'remote_file', ')', ')', 'if', 'auto_collect_result', ':', 'self', '.', 'getFile_with_timeout', '(', '"C:\\\\Windows\\\\CarbonBlack\\\\{}.zip"', '.', 'format', '(', 'remote_file', ')', ')', 'return', 'True', 'print', '(', '"+ Memory dump complete on host -> C:\\windows\\carbonblack\\{}"', '.', 'format', '(', 'remote_file', ')', ')', 'except', 'LiveResponseError', 'as', 'e', ':', 'raise', 'Exception', '(', '"LiveResponseError: {}"', '.', 'format', '(', 'e', ')', ')', 'if', 'custom_compress', ':', '# compress with powershell?', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'custom_compress_file', ')', ':', 'logging', '.', 'debug', '(', '"{} not found."', '.', 'format', '(', 'custom_compress_file', ')', ')', 'HOME_DIR', '=', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'realpath', '(', '__file__', ')', ',', "'..'", ',', "'..'", ')', ')', 'custom_compress_file', '=', 'os', '.', 'path', '.', 'join', '(', 'HOME_DIR', ',', "'lr_tools'", ',', "'compress_file.bat'", ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'custom_compress_file', ')', ':', 'logging', '.', 'error', '(', '"{} not found."', '.', 'format', '(', 'custom_compress_file', ')', ')', 'return', 'False', 'logging', '.', 'info', '(', '"Using {}"', '.', 'format', '(', 'custom_compress_file', ')', ')', 'bat_filename', '=', 'custom_compress_file', '[', 'custom_compress_file', '.', 'rfind', '(', "'/'", ')', '+', '1', ':', ']', 'filedata', '=', 'None', 'with', 'open', '(', 'custom_compress_file', ',', "'rb'", ')', 'as', 'f', ':', 'filedata', '=', 'f', '.', 'read', '(', ')', 'try', ':', 'self', '.', 'lr_session', '.', 'put_file', '(', 'filedata', ',', '"C:\\\\Windows\\\\CarbonBlack\\\\"', '+', 'bat_filename', ')', 'except', 'LiveResponseError', 'as', 'e', ':', 'if', "'ERROR_FILE_EXISTS'", 'not', 'in', 'str', '(', 'e', ')', ':', 'logging', '.', 'error', '(', '"Error puting compress_file.bat"', ')', 'return', 'False', 'else', ':', 'self', '.', 'lr_session', '.', 'delete_file', '(', '"C:\\\\Windows\\\\CarbonBlack\\\\"', '+', 'bat_filename', ')', 'self', '.', 'lr_session', '.', 'put_file', '(', 'filedata', ',', '"C:\\\\Windows\\\\CarbonBlack\\\\"', '+', 'bat_filename', ')', 'print', '(', '"~ Launching "', '+', 'bat_filename', '+', '" to create C:\\\\windows\\\\carbonblack\\\\_memdump.zip"', ')', 'compress_cmd', '=', '"C:\\\\Windows\\\\CarbonBlack\\\\"', '+', 'bat_filename', '+', '" "', '+', 'remote_file', 'self', '.', 'lr_session', '.', 'create_process', '(', 'compress_cmd', ',', 'wait_for_output', '=', 'False', ',', 'wait_for_completion', '=', 'False', ')', 'if', 'auto_collect_result', ':', 'print', '(', '"~ waiting for {} to complete."', '.', 'format', '(', 'bat_filename', ')', ')', 'self', '.', 'wait_for_process_to_finish', '(', 'bat_filename', ')', 'self', '.', 'getFile_with_timeout', '(', '"C:\\\\windows\\\\carbonblack\\\\_memdump.zip"', ')', 'print', '(', '"[!] If compression successful, _memdump.zip will exist, and {} should be deleted."', '.', 'format', '(', 'remote_file', ')', ')', "# here, they didn't want to use cb or custom compression, but they did want to auto collect results", 'if', 'auto_collect_result', ':', 'self', '.', 'getFile_with_timeout', '(', '"C:\\\\Windows\\\\CarbonBlack\\\\{}"', '.', 'format', '(', 'remote_file', ')', ')', 'return', 'True'] | Customized function for dumping sensor memory.
:arguments cb_compress: If True, use CarbonBlack's built-in compression.
:arguments custom_compress_file: Supply path to lr_tools/compress_file.bat to fork powershell compression
:collect_mem_file: If True, wait for memdump + and compression to complete, then use cbapi to collect | ['Customized', 'function', 'for', 'dumping', 'sensor', 'memory', '.'] | train | https://github.com/IntegralDefense/cbinterface/blob/30af06b56d723443b6fcf156756a2a20d395dd7f/cbinterface/modules/response.py#L187-L249 |
8,644 | etingof/pysnmp | pysnmp/smi/instrum.py | MibInstrumController.readNextMibObjects | def readNextMibObjects(self, *varBinds, **context):
"""Read Managed Objects Instances next to the given ones.
Given one or more py:class:`~pysnmp.smi.rfc1902.ObjectType` objects, read
all or none of the Managed Objects Instances next to the referenced ones.
Parameters
----------
varBinds: :py:class:`tuple` of :py:class:`~pysnmp.smi.rfc1902.ObjectType` objects
representing Managed Objects Instances to read next to.
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
If not provided, default function will raise exception in case
of an error.
* `acFun` (callable) - user-supplied callable that is invoked to
authorize access to the requested Managed Object Instance. If
not supplied, no access control will be performed.
Notes
-----
The signature of the callback functions (e.g. `cbFun`, `acFun`) is this:
.. code-block: python
def cbFun(varBinds, **context):
errors = context.get(errors)
if errors:
print(errors[0].error)
else:
print(', '.join('%s = %s' % varBind for varBind in varBinds))
In case of errors, the `errors` key in the `context` dict will contain
a sequence of `dict` objects describing one or more errors that occur.
If a non-existing Managed Object is referenced, no error will be
reported, but the values returned in the `varBinds` would be one of:
:py:class:`NoSuchObject` (indicating non-existent Managed Object) or
:py:class:`NoSuchInstance` (if Managed Object exists, but is not
instantiated) or :py:class:`EndOfMibView` (when the last Managed Object
Instance has been read).
When :py:class:`NoSuchObject` or :py:class:`NoSuchInstance` values are
returned, the caller is expected to repeat the same call with some
or all `varBinds` returned to progress towards the end of the
implemented MIB.
"""
if 'cbFun' not in context:
context['cbFun'] = self._defaultErrorHandler
self.flipFlopFsm(self.FSM_READ_NEXT_VAR, *varBinds, **context) | python | def readNextMibObjects(self, *varBinds, **context):
"""Read Managed Objects Instances next to the given ones.
Given one or more py:class:`~pysnmp.smi.rfc1902.ObjectType` objects, read
all or none of the Managed Objects Instances next to the referenced ones.
Parameters
----------
varBinds: :py:class:`tuple` of :py:class:`~pysnmp.smi.rfc1902.ObjectType` objects
representing Managed Objects Instances to read next to.
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
If not provided, default function will raise exception in case
of an error.
* `acFun` (callable) - user-supplied callable that is invoked to
authorize access to the requested Managed Object Instance. If
not supplied, no access control will be performed.
Notes
-----
The signature of the callback functions (e.g. `cbFun`, `acFun`) is this:
.. code-block: python
def cbFun(varBinds, **context):
errors = context.get(errors)
if errors:
print(errors[0].error)
else:
print(', '.join('%s = %s' % varBind for varBind in varBinds))
In case of errors, the `errors` key in the `context` dict will contain
a sequence of `dict` objects describing one or more errors that occur.
If a non-existing Managed Object is referenced, no error will be
reported, but the values returned in the `varBinds` would be one of:
:py:class:`NoSuchObject` (indicating non-existent Managed Object) or
:py:class:`NoSuchInstance` (if Managed Object exists, but is not
instantiated) or :py:class:`EndOfMibView` (when the last Managed Object
Instance has been read).
When :py:class:`NoSuchObject` or :py:class:`NoSuchInstance` values are
returned, the caller is expected to repeat the same call with some
or all `varBinds` returned to progress towards the end of the
implemented MIB.
"""
if 'cbFun' not in context:
context['cbFun'] = self._defaultErrorHandler
self.flipFlopFsm(self.FSM_READ_NEXT_VAR, *varBinds, **context) | ['def', 'readNextMibObjects', '(', 'self', ',', '*', 'varBinds', ',', '*', '*', 'context', ')', ':', 'if', "'cbFun'", 'not', 'in', 'context', ':', 'context', '[', "'cbFun'", ']', '=', 'self', '.', '_defaultErrorHandler', 'self', '.', 'flipFlopFsm', '(', 'self', '.', 'FSM_READ_NEXT_VAR', ',', '*', 'varBinds', ',', '*', '*', 'context', ')'] | Read Managed Objects Instances next to the given ones.
Given one or more py:class:`~pysnmp.smi.rfc1902.ObjectType` objects, read
all or none of the Managed Objects Instances next to the referenced ones.
Parameters
----------
varBinds: :py:class:`tuple` of :py:class:`~pysnmp.smi.rfc1902.ObjectType` objects
representing Managed Objects Instances to read next to.
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
If not provided, default function will raise exception in case
of an error.
* `acFun` (callable) - user-supplied callable that is invoked to
authorize access to the requested Managed Object Instance. If
not supplied, no access control will be performed.
Notes
-----
The signature of the callback functions (e.g. `cbFun`, `acFun`) is this:
.. code-block: python
def cbFun(varBinds, **context):
errors = context.get(errors)
if errors:
print(errors[0].error)
else:
print(', '.join('%s = %s' % varBind for varBind in varBinds))
In case of errors, the `errors` key in the `context` dict will contain
a sequence of `dict` objects describing one or more errors that occur.
If a non-existing Managed Object is referenced, no error will be
reported, but the values returned in the `varBinds` would be one of:
:py:class:`NoSuchObject` (indicating non-existent Managed Object) or
:py:class:`NoSuchInstance` (if Managed Object exists, but is not
instantiated) or :py:class:`EndOfMibView` (when the last Managed Object
Instance has been read).
When :py:class:`NoSuchObject` or :py:class:`NoSuchInstance` values are
returned, the caller is expected to repeat the same call with some
or all `varBinds` returned to progress towards the end of the
implemented MIB. | ['Read', 'Managed', 'Objects', 'Instances', 'next', 'to', 'the', 'given', 'ones', '.'] | train | https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/instrum.py#L437-L495 |
8,645 | pycontribs/pyrax | pyrax/http.py | http_log_resp | def http_log_resp(resp, body):
"""
When pyrax.get_http_debug() is True, outputs the response received
from the API request.
"""
if not pyrax.get_http_debug():
return
log = logging.getLogger("pyrax")
log.debug("RESP: %s\n%s", resp, resp.headers)
if body:
log.debug("RESP BODY: %s", body) | python | def http_log_resp(resp, body):
"""
When pyrax.get_http_debug() is True, outputs the response received
from the API request.
"""
if not pyrax.get_http_debug():
return
log = logging.getLogger("pyrax")
log.debug("RESP: %s\n%s", resp, resp.headers)
if body:
log.debug("RESP BODY: %s", body) | ['def', 'http_log_resp', '(', 'resp', ',', 'body', ')', ':', 'if', 'not', 'pyrax', '.', 'get_http_debug', '(', ')', ':', 'return', 'log', '=', 'logging', '.', 'getLogger', '(', '"pyrax"', ')', 'log', '.', 'debug', '(', '"RESP: %s\\n%s"', ',', 'resp', ',', 'resp', '.', 'headers', ')', 'if', 'body', ':', 'log', '.', 'debug', '(', '"RESP BODY: %s"', ',', 'body', ')'] | When pyrax.get_http_debug() is True, outputs the response received
from the API request. | ['When', 'pyrax', '.', 'get_http_debug', '()', 'is', 'True', 'outputs', 'the', 'response', 'received', 'from', 'the', 'API', 'request', '.'] | train | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/http.py#L103-L113 |
8,646 | python-diamond/Diamond | src/diamond/handler/graphitepickle.py | GraphitePickleHandler.get_default_config | def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GraphitePickleHandler, self).get_default_config()
config.update({
'port': 2004,
})
return config | python | def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GraphitePickleHandler, self).get_default_config()
config.update({
'port': 2004,
})
return config | ['def', 'get_default_config', '(', 'self', ')', ':', 'config', '=', 'super', '(', 'GraphitePickleHandler', ',', 'self', ')', '.', 'get_default_config', '(', ')', 'config', '.', 'update', '(', '{', "'port'", ':', '2004', ',', '}', ')', 'return', 'config'] | Return the default config for the handler | ['Return', 'the', 'default', 'config', 'for', 'the', 'handler'] | train | https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/handler/graphitepickle.py#L57-L67 |
8,647 | datastax/python-driver | cassandra/cluster.py | ResultSet.one | def one(self):
"""
Return a single row of the results or None if empty. This is basically
a shortcut to `result_set.current_rows[0]` and should only be used when
you know a query returns a single row. Consider using an iterator if the
ResultSet contains more than one row.
"""
row = None
if self._current_rows:
try:
row = self._current_rows[0]
except TypeError: # generator object is not subscriptable, PYTHON-1026
row = next(iter(self._current_rows))
return row | python | def one(self):
"""
Return a single row of the results or None if empty. This is basically
a shortcut to `result_set.current_rows[0]` and should only be used when
you know a query returns a single row. Consider using an iterator if the
ResultSet contains more than one row.
"""
row = None
if self._current_rows:
try:
row = self._current_rows[0]
except TypeError: # generator object is not subscriptable, PYTHON-1026
row = next(iter(self._current_rows))
return row | ['def', 'one', '(', 'self', ')', ':', 'row', '=', 'None', 'if', 'self', '.', '_current_rows', ':', 'try', ':', 'row', '=', 'self', '.', '_current_rows', '[', '0', ']', 'except', 'TypeError', ':', '# generator object is not subscriptable, PYTHON-1026', 'row', '=', 'next', '(', 'iter', '(', 'self', '.', '_current_rows', ')', ')', 'return', 'row'] | Return a single row of the results or None if empty. This is basically
a shortcut to `result_set.current_rows[0]` and should only be used when
you know a query returns a single row. Consider using an iterator if the
ResultSet contains more than one row. | ['Return', 'a', 'single', 'row', 'of', 'the', 'results', 'or', 'None', 'if', 'empty', '.', 'This', 'is', 'basically', 'a', 'shortcut', 'to', 'result_set', '.', 'current_rows', '[', '0', ']', 'and', 'should', 'only', 'be', 'used', 'when', 'you', 'know', 'a', 'query', 'returns', 'a', 'single', 'row', '.', 'Consider', 'using', 'an', 'iterator', 'if', 'the', 'ResultSet', 'contains', 'more', 'than', 'one', 'row', '.'] | train | https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cluster.py#L4386-L4400 |
8,648 | drj11/pypng | code/mkiccp.py | black | def black(m):
"""Return a function that maps all values from [0.0,m] to 0, and maps
the range [m,1.0] into [0.0, 1.0] linearly.
"""
m = float(m)
def f(x):
if x <= m:
return 0.0
return (x - m) / (1.0 - m)
return f | python | def black(m):
"""Return a function that maps all values from [0.0,m] to 0, and maps
the range [m,1.0] into [0.0, 1.0] linearly.
"""
m = float(m)
def f(x):
if x <= m:
return 0.0
return (x - m) / (1.0 - m)
return f | ['def', 'black', '(', 'm', ')', ':', 'm', '=', 'float', '(', 'm', ')', 'def', 'f', '(', 'x', ')', ':', 'if', 'x', '<=', 'm', ':', 'return', '0.0', 'return', '(', 'x', '-', 'm', ')', '/', '(', '1.0', '-', 'm', ')', 'return', 'f'] | Return a function that maps all values from [0.0,m] to 0, and maps
the range [m,1.0] into [0.0, 1.0] linearly. | ['Return', 'a', 'function', 'that', 'maps', 'all', 'values', 'from', '[', '0', '.', '0', 'm', ']', 'to', '0', 'and', 'maps', 'the', 'range', '[', 'm', '1', '.', '0', ']', 'into', '[', '0', '.', '0', '1', '.', '0', ']', 'linearly', '.'] | train | https://github.com/drj11/pypng/blob/b8220ca9f58e4c5bc1d507e713744fcb8c049225/code/mkiccp.py#L15-L26 |
8,649 | razor-x/scipy-data_fitting | scipy_data_fitting/plot.py | Plot.figure | def figure(self):
"""
The [`matplotlib.pyplot.figure`][1] instance.
[1]: http://matplotlib.org/api/figure_api.html#matplotlib.figure.Figure
"""
if not hasattr(self, '_figure'): self._figure = matplotlib.pyplot.figure()
return self._figure | python | def figure(self):
"""
The [`matplotlib.pyplot.figure`][1] instance.
[1]: http://matplotlib.org/api/figure_api.html#matplotlib.figure.Figure
"""
if not hasattr(self, '_figure'): self._figure = matplotlib.pyplot.figure()
return self._figure | ['def', 'figure', '(', 'self', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', "'_figure'", ')', ':', 'self', '.', '_figure', '=', 'matplotlib', '.', 'pyplot', '.', 'figure', '(', ')', 'return', 'self', '.', '_figure'] | The [`matplotlib.pyplot.figure`][1] instance.
[1]: http://matplotlib.org/api/figure_api.html#matplotlib.figure.Figure | ['The', '[', 'matplotlib', '.', 'pyplot', '.', 'figure', ']', '[', '1', ']', 'instance', '.'] | train | https://github.com/razor-x/scipy-data_fitting/blob/c756a645da8629699b3f22244bfb7d5d4d88b179/scipy_data_fitting/plot.py#L75-L82 |
8,650 | YosaiProject/yosai | yosai/web/subject/subject.py | WebYosai.requires_role | def requires_role(role_s, logical_operator=all):
"""
Requires that the calling Subject be authorized to the extent that is
required to satisfy the role_s specified and the logical operation
upon them.
:param role_s: a collection of the role(s) required, specified by
identifiers (such as a role name)
:type role_s: a List of Strings
:param logical_operator: indicates whether all or at least one permission
is true (and, any)
:type: and OR all (from python standard library)
Elaborate Example:
requires_role(role_s=['sysadmin', 'developer'], logical_operator=any)
Basic Example:
requires_role('physician')
"""
def outer_wrap(fn):
@functools.wraps(fn)
def inner_wrap(*args, **kwargs):
subject = WebYosai.get_current_subject()
try:
subject.check_role(role_s, logical_operator)
except ValueError:
msg = ("Attempting to perform a user-only operation. The "
"current Subject is NOT a user (they haven't been "
"authenticated or remembered from a previous login). "
"ACCESS DENIED.")
raise WebYosai.get_current_webregistry().raise_unauthorized(msg)
except AuthorizationException:
msg = "Access Denied. Insufficient Role Membership."
raise WebYosai.get_current_webregistry().raise_forbidden(msg)
return fn(*args, **kwargs)
return inner_wrap
return outer_wrap | python | def requires_role(role_s, logical_operator=all):
"""
Requires that the calling Subject be authorized to the extent that is
required to satisfy the role_s specified and the logical operation
upon them.
:param role_s: a collection of the role(s) required, specified by
identifiers (such as a role name)
:type role_s: a List of Strings
:param logical_operator: indicates whether all or at least one permission
is true (and, any)
:type: and OR all (from python standard library)
Elaborate Example:
requires_role(role_s=['sysadmin', 'developer'], logical_operator=any)
Basic Example:
requires_role('physician')
"""
def outer_wrap(fn):
@functools.wraps(fn)
def inner_wrap(*args, **kwargs):
subject = WebYosai.get_current_subject()
try:
subject.check_role(role_s, logical_operator)
except ValueError:
msg = ("Attempting to perform a user-only operation. The "
"current Subject is NOT a user (they haven't been "
"authenticated or remembered from a previous login). "
"ACCESS DENIED.")
raise WebYosai.get_current_webregistry().raise_unauthorized(msg)
except AuthorizationException:
msg = "Access Denied. Insufficient Role Membership."
raise WebYosai.get_current_webregistry().raise_forbidden(msg)
return fn(*args, **kwargs)
return inner_wrap
return outer_wrap | ['def', 'requires_role', '(', 'role_s', ',', 'logical_operator', '=', 'all', ')', ':', 'def', 'outer_wrap', '(', 'fn', ')', ':', '@', 'functools', '.', 'wraps', '(', 'fn', ')', 'def', 'inner_wrap', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'subject', '=', 'WebYosai', '.', 'get_current_subject', '(', ')', 'try', ':', 'subject', '.', 'check_role', '(', 'role_s', ',', 'logical_operator', ')', 'except', 'ValueError', ':', 'msg', '=', '(', '"Attempting to perform a user-only operation. The "', '"current Subject is NOT a user (they haven\'t been "', '"authenticated or remembered from a previous login). "', '"ACCESS DENIED."', ')', 'raise', 'WebYosai', '.', 'get_current_webregistry', '(', ')', '.', 'raise_unauthorized', '(', 'msg', ')', 'except', 'AuthorizationException', ':', 'msg', '=', '"Access Denied. Insufficient Role Membership."', 'raise', 'WebYosai', '.', 'get_current_webregistry', '(', ')', '.', 'raise_forbidden', '(', 'msg', ')', 'return', 'fn', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'inner_wrap', 'return', 'outer_wrap'] | Requires that the calling Subject be authorized to the extent that is
required to satisfy the role_s specified and the logical operation
upon them.
:param role_s: a collection of the role(s) required, specified by
identifiers (such as a role name)
:type role_s: a List of Strings
:param logical_operator: indicates whether all or at least one permission
is true (and, any)
:type: and OR all (from python standard library)
Elaborate Example:
requires_role(role_s=['sysadmin', 'developer'], logical_operator=any)
Basic Example:
requires_role('physician') | ['Requires', 'that', 'the', 'calling', 'Subject', 'be', 'authorized', 'to', 'the', 'extent', 'that', 'is', 'required', 'to', 'satisfy', 'the', 'role_s', 'specified', 'and', 'the', 'logical', 'operation', 'upon', 'them', '.'] | train | https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/web/subject/subject.py#L395-L437 |
8,651 | saltstack/salt | salt/modules/lxc.py | _ensure_running | def _ensure_running(name, no_start=False, path=None):
'''
If the container is not currently running, start it. This function returns
the state that the container was in before changing
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
'''
_ensure_exists(name, path=path)
pre = state(name, path=path)
if pre == 'running':
# This will be a no-op but running the function will give us a pretty
# return dict.
return start(name, path=path)
elif pre == 'stopped':
if no_start:
raise CommandExecutionError(
'Container \'{0}\' is not running'.format(name)
)
return start(name, path=path)
elif pre == 'frozen':
if no_start:
raise CommandExecutionError(
'Container \'{0}\' is not running'.format(name)
)
return unfreeze(name, path=path) | python | def _ensure_running(name, no_start=False, path=None):
'''
If the container is not currently running, start it. This function returns
the state that the container was in before changing
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
'''
_ensure_exists(name, path=path)
pre = state(name, path=path)
if pre == 'running':
# This will be a no-op but running the function will give us a pretty
# return dict.
return start(name, path=path)
elif pre == 'stopped':
if no_start:
raise CommandExecutionError(
'Container \'{0}\' is not running'.format(name)
)
return start(name, path=path)
elif pre == 'frozen':
if no_start:
raise CommandExecutionError(
'Container \'{0}\' is not running'.format(name)
)
return unfreeze(name, path=path) | ['def', '_ensure_running', '(', 'name', ',', 'no_start', '=', 'False', ',', 'path', '=', 'None', ')', ':', '_ensure_exists', '(', 'name', ',', 'path', '=', 'path', ')', 'pre', '=', 'state', '(', 'name', ',', 'path', '=', 'path', ')', 'if', 'pre', '==', "'running'", ':', '# This will be a no-op but running the function will give us a pretty', '# return dict.', 'return', 'start', '(', 'name', ',', 'path', '=', 'path', ')', 'elif', 'pre', '==', "'stopped'", ':', 'if', 'no_start', ':', 'raise', 'CommandExecutionError', '(', "'Container \\'{0}\\' is not running'", '.', 'format', '(', 'name', ')', ')', 'return', 'start', '(', 'name', ',', 'path', '=', 'path', ')', 'elif', 'pre', '==', "'frozen'", ':', 'if', 'no_start', ':', 'raise', 'CommandExecutionError', '(', "'Container \\'{0}\\' is not running'", '.', 'format', '(', 'name', ')', ')', 'return', 'unfreeze', '(', 'name', ',', 'path', '=', 'path', ')'] | If the container is not currently running, start it. This function returns
the state that the container was in before changing
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0 | ['If', 'the', 'container', 'is', 'not', 'currently', 'running', 'start', 'it', '.', 'This', 'function', 'returns', 'the', 'state', 'that', 'the', 'container', 'was', 'in', 'before', 'changing'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxc.py#L2312-L2340 |
8,652 | ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/certificates/certificates.py | CertificatesAPI.update_certificate | def update_certificate(self, certificate_id, **kwargs):
"""Update a certificate.
:param str certificate_id: The certificate id (Required)
:param str certificate_data: X509.v3 trusted certificate in PEM format.
:param str signature: This parameter has been DEPRECATED in the API and does not need to
be provided.
:param str type: type of the certificate. Values: lwm2m or bootstrap.
:param str status: Status of the certificate.
Allowed values: "ACTIVE" | "INACTIVE".
:param str description: Human readable description of this certificate,
not longer than 500 characters.
:returns: Certificate object
:rtype: Certificate
"""
api = self._get_api(iam.DeveloperApi)
cert = Certificate._create_request_map(kwargs)
body = iam.TrustedCertificateReq(**cert)
certificate = Certificate(api.update_certificate(certificate_id, body))
return self.get_certificate(certificate.id) | python | def update_certificate(self, certificate_id, **kwargs):
"""Update a certificate.
:param str certificate_id: The certificate id (Required)
:param str certificate_data: X509.v3 trusted certificate in PEM format.
:param str signature: This parameter has been DEPRECATED in the API and does not need to
be provided.
:param str type: type of the certificate. Values: lwm2m or bootstrap.
:param str status: Status of the certificate.
Allowed values: "ACTIVE" | "INACTIVE".
:param str description: Human readable description of this certificate,
not longer than 500 characters.
:returns: Certificate object
:rtype: Certificate
"""
api = self._get_api(iam.DeveloperApi)
cert = Certificate._create_request_map(kwargs)
body = iam.TrustedCertificateReq(**cert)
certificate = Certificate(api.update_certificate(certificate_id, body))
return self.get_certificate(certificate.id) | ['def', 'update_certificate', '(', 'self', ',', 'certificate_id', ',', '*', '*', 'kwargs', ')', ':', 'api', '=', 'self', '.', '_get_api', '(', 'iam', '.', 'DeveloperApi', ')', 'cert', '=', 'Certificate', '.', '_create_request_map', '(', 'kwargs', ')', 'body', '=', 'iam', '.', 'TrustedCertificateReq', '(', '*', '*', 'cert', ')', 'certificate', '=', 'Certificate', '(', 'api', '.', 'update_certificate', '(', 'certificate_id', ',', 'body', ')', ')', 'return', 'self', '.', 'get_certificate', '(', 'certificate', '.', 'id', ')'] | Update a certificate.
:param str certificate_id: The certificate id (Required)
:param str certificate_data: X509.v3 trusted certificate in PEM format.
:param str signature: This parameter has been DEPRECATED in the API and does not need to
be provided.
:param str type: type of the certificate. Values: lwm2m or bootstrap.
:param str status: Status of the certificate.
Allowed values: "ACTIVE" | "INACTIVE".
:param str description: Human readable description of this certificate,
not longer than 500 characters.
:returns: Certificate object
:rtype: Certificate | ['Update', 'a', 'certificate', '.'] | train | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/certificates/certificates.py#L176-L195 |
8,653 | radzak/rtv-downloader | rtv/utils.py | clean_title | def clean_title(title):
"""
Clean title -> remove dates, remove duplicated spaces and strip title.
Args:
title (str): Title.
Returns:
str: Clean title without dates, duplicated, trailing and leading spaces.
"""
date_pattern = re.compile(r'\W*'
r'\d{1,2}'
r'[/\-.]'
r'\d{1,2}'
r'[/\-.]'
r'(?=\d*)(?:.{4}|.{2})'
r'\W*')
title = date_pattern.sub(' ', title)
title = re.sub(r'\s{2,}', ' ', title)
title = title.strip()
return title | python | def clean_title(title):
"""
Clean title -> remove dates, remove duplicated spaces and strip title.
Args:
title (str): Title.
Returns:
str: Clean title without dates, duplicated, trailing and leading spaces.
"""
date_pattern = re.compile(r'\W*'
r'\d{1,2}'
r'[/\-.]'
r'\d{1,2}'
r'[/\-.]'
r'(?=\d*)(?:.{4}|.{2})'
r'\W*')
title = date_pattern.sub(' ', title)
title = re.sub(r'\s{2,}', ' ', title)
title = title.strip()
return title | ['def', 'clean_title', '(', 'title', ')', ':', 'date_pattern', '=', 're', '.', 'compile', '(', "r'\\W*'", "r'\\d{1,2}'", "r'[/\\-.]'", "r'\\d{1,2}'", "r'[/\\-.]'", "r'(?=\\d*)(?:.{4}|.{2})'", "r'\\W*'", ')', 'title', '=', 'date_pattern', '.', 'sub', '(', "' '", ',', 'title', ')', 'title', '=', 're', '.', 'sub', '(', "r'\\s{2,}'", ',', "' '", ',', 'title', ')', 'title', '=', 'title', '.', 'strip', '(', ')', 'return', 'title'] | Clean title -> remove dates, remove duplicated spaces and strip title.
Args:
title (str): Title.
Returns:
str: Clean title without dates, duplicated, trailing and leading spaces. | ['Clean', 'title', '-', '>', 'remove', 'dates', 'remove', 'duplicated', 'spaces', 'and', 'strip', 'title', '.'] | train | https://github.com/radzak/rtv-downloader/blob/b9114b7f4c35fabe6ec9ad1764a65858667a866e/rtv/utils.py#L122-L143 |
8,654 | Datary/scrapbag | scrapbag/csvs.py | row_csv_limiter | def row_csv_limiter(rows, limits=None):
"""
Limit row passing a value or detect limits making the best effort.
"""
limits = [None, None] if limits is None else limits
if len(exclude_empty_values(limits)) == 2:
upper_limit = limits[0]
lower_limit = limits[1]
elif len(exclude_empty_values(limits)) == 1:
upper_limit = limits[0]
lower_limit = row_iter_limiter(rows, 1, -1, 1)
else:
upper_limit = row_iter_limiter(rows, 0, 1, 0)
lower_limit = row_iter_limiter(rows, 1, -1, 1)
return rows[upper_limit: lower_limit] | python | def row_csv_limiter(rows, limits=None):
"""
Limit row passing a value or detect limits making the best effort.
"""
limits = [None, None] if limits is None else limits
if len(exclude_empty_values(limits)) == 2:
upper_limit = limits[0]
lower_limit = limits[1]
elif len(exclude_empty_values(limits)) == 1:
upper_limit = limits[0]
lower_limit = row_iter_limiter(rows, 1, -1, 1)
else:
upper_limit = row_iter_limiter(rows, 0, 1, 0)
lower_limit = row_iter_limiter(rows, 1, -1, 1)
return rows[upper_limit: lower_limit] | ['def', 'row_csv_limiter', '(', 'rows', ',', 'limits', '=', 'None', ')', ':', 'limits', '=', '[', 'None', ',', 'None', ']', 'if', 'limits', 'is', 'None', 'else', 'limits', 'if', 'len', '(', 'exclude_empty_values', '(', 'limits', ')', ')', '==', '2', ':', 'upper_limit', '=', 'limits', '[', '0', ']', 'lower_limit', '=', 'limits', '[', '1', ']', 'elif', 'len', '(', 'exclude_empty_values', '(', 'limits', ')', ')', '==', '1', ':', 'upper_limit', '=', 'limits', '[', '0', ']', 'lower_limit', '=', 'row_iter_limiter', '(', 'rows', ',', '1', ',', '-', '1', ',', '1', ')', 'else', ':', 'upper_limit', '=', 'row_iter_limiter', '(', 'rows', ',', '0', ',', '1', ',', '0', ')', 'lower_limit', '=', 'row_iter_limiter', '(', 'rows', ',', '1', ',', '-', '1', ',', '1', ')', 'return', 'rows', '[', 'upper_limit', ':', 'lower_limit', ']'] | Limit row passing a value or detect limits making the best effort. | ['Limit', 'row', 'passing', 'a', 'value', 'or', 'detect', 'limits', 'making', 'the', 'best', 'effort', '.'] | train | https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L244-L261 |
8,655 | quantmind/pulsar | pulsar/utils/pylib/websocket.py | FrameParser.close | def close(self, code=None):
'''return a `close` :class:`Frame`.
'''
code = code or 1000
body = pack('!H', code)
body += self._close_codes.get(code, '').encode('utf-8')
return self.encode(body, opcode=0x8) | python | def close(self, code=None):
'''return a `close` :class:`Frame`.
'''
code = code or 1000
body = pack('!H', code)
body += self._close_codes.get(code, '').encode('utf-8')
return self.encode(body, opcode=0x8) | ['def', 'close', '(', 'self', ',', 'code', '=', 'None', ')', ':', 'code', '=', 'code', 'or', '1000', 'body', '=', 'pack', '(', "'!H'", ',', 'code', ')', 'body', '+=', 'self', '.', '_close_codes', '.', 'get', '(', 'code', ',', "''", ')', '.', 'encode', '(', "'utf-8'", ')', 'return', 'self', '.', 'encode', '(', 'body', ',', 'opcode', '=', '0x8', ')'] | return a `close` :class:`Frame`. | ['return', 'a', 'close', ':', 'class', ':', 'Frame', '.'] | train | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/pylib/websocket.py#L128-L134 |
8,656 | eldarion/formly | formly/views/design.py | survey_change_name | def survey_change_name(request, pk):
"""
Works well with:
http://www.appelsiini.net/projects/jeditable
"""
survey = get_object_or_404(Survey, pk=pk)
if not request.user.has_perm("formly.change_survey_name", obj=survey):
raise PermissionDenied()
survey.name = request.POST.get("name")
survey.save()
return JsonResponse({
"status": "OK",
"name": survey.name
}) | python | def survey_change_name(request, pk):
"""
Works well with:
http://www.appelsiini.net/projects/jeditable
"""
survey = get_object_or_404(Survey, pk=pk)
if not request.user.has_perm("formly.change_survey_name", obj=survey):
raise PermissionDenied()
survey.name = request.POST.get("name")
survey.save()
return JsonResponse({
"status": "OK",
"name": survey.name
}) | ['def', 'survey_change_name', '(', 'request', ',', 'pk', ')', ':', 'survey', '=', 'get_object_or_404', '(', 'Survey', ',', 'pk', '=', 'pk', ')', 'if', 'not', 'request', '.', 'user', '.', 'has_perm', '(', '"formly.change_survey_name"', ',', 'obj', '=', 'survey', ')', ':', 'raise', 'PermissionDenied', '(', ')', 'survey', '.', 'name', '=', 'request', '.', 'POST', '.', 'get', '(', '"name"', ')', 'survey', '.', 'save', '(', ')', 'return', 'JsonResponse', '(', '{', '"status"', ':', '"OK"', ',', '"name"', ':', 'survey', '.', 'name', '}', ')'] | Works well with:
http://www.appelsiini.net/projects/jeditable | ['Works', 'well', 'with', ':', 'http', ':', '//', 'www', '.', 'appelsiini', '.', 'net', '/', 'projects', '/', 'jeditable'] | train | https://github.com/eldarion/formly/blob/05dfefbbf93ded48e560a6bacf1fc77487a0a4e3/formly/views/design.py#L99-L114 |
8,657 | openfisca/openfisca-web-api | openfisca_web_api/controllers/__init__.py | make_router | def make_router():
"""Return a WSGI application that searches requests to controllers """
global router
routings = [
('GET', '^/$', index),
('GET', '^/api/?$', index),
('POST', '^/api/1/calculate/?$', calculate.api1_calculate),
('GET', '^/api/2/entities/?$', entities.api2_entities),
('GET', '^/api/1/field/?$', field.api1_field),
('GET', '^/api/1/formula/(?P<name>[^/]+)/?$', formula.api1_formula),
('GET', '^/api/2/formula/(?:(?P<period>[A-Za-z0-9:-]*)/)?(?P<names>[A-Za-z0-9_+-]+)/?$', formula.api2_formula),
('GET', '^/api/1/parameters/?$', parameters.api1_parameters),
('GET', '^/api/1/reforms/?$', reforms.api1_reforms),
('POST', '^/api/1/simulate/?$', simulate.api1_simulate),
('GET', '^/api/1/swagger$', swagger.api1_swagger),
('GET', '^/api/1/variables/?$', variables.api1_variables),
]
router = urls.make_router(*routings)
return router | python | def make_router():
"""Return a WSGI application that searches requests to controllers """
global router
routings = [
('GET', '^/$', index),
('GET', '^/api/?$', index),
('POST', '^/api/1/calculate/?$', calculate.api1_calculate),
('GET', '^/api/2/entities/?$', entities.api2_entities),
('GET', '^/api/1/field/?$', field.api1_field),
('GET', '^/api/1/formula/(?P<name>[^/]+)/?$', formula.api1_formula),
('GET', '^/api/2/formula/(?:(?P<period>[A-Za-z0-9:-]*)/)?(?P<names>[A-Za-z0-9_+-]+)/?$', formula.api2_formula),
('GET', '^/api/1/parameters/?$', parameters.api1_parameters),
('GET', '^/api/1/reforms/?$', reforms.api1_reforms),
('POST', '^/api/1/simulate/?$', simulate.api1_simulate),
('GET', '^/api/1/swagger$', swagger.api1_swagger),
('GET', '^/api/1/variables/?$', variables.api1_variables),
]
router = urls.make_router(*routings)
return router | ['def', 'make_router', '(', ')', ':', 'global', 'router', 'routings', '=', '[', '(', "'GET'", ',', "'^/$'", ',', 'index', ')', ',', '(', "'GET'", ',', "'^/api/?$'", ',', 'index', ')', ',', '(', "'POST'", ',', "'^/api/1/calculate/?$'", ',', 'calculate', '.', 'api1_calculate', ')', ',', '(', "'GET'", ',', "'^/api/2/entities/?$'", ',', 'entities', '.', 'api2_entities', ')', ',', '(', "'GET'", ',', "'^/api/1/field/?$'", ',', 'field', '.', 'api1_field', ')', ',', '(', "'GET'", ',', "'^/api/1/formula/(?P<name>[^/]+)/?$'", ',', 'formula', '.', 'api1_formula', ')', ',', '(', "'GET'", ',', "'^/api/2/formula/(?:(?P<period>[A-Za-z0-9:-]*)/)?(?P<names>[A-Za-z0-9_+-]+)/?$'", ',', 'formula', '.', 'api2_formula', ')', ',', '(', "'GET'", ',', "'^/api/1/parameters/?$'", ',', 'parameters', '.', 'api1_parameters', ')', ',', '(', "'GET'", ',', "'^/api/1/reforms/?$'", ',', 'reforms', '.', 'api1_reforms', ')', ',', '(', "'POST'", ',', "'^/api/1/simulate/?$'", ',', 'simulate', '.', 'api1_simulate', ')', ',', '(', "'GET'", ',', "'^/api/1/swagger$'", ',', 'swagger', '.', 'api1_swagger', ')', ',', '(', "'GET'", ',', "'^/api/1/variables/?$'", ',', 'variables', '.', 'api1_variables', ')', ',', ']', 'router', '=', 'urls', '.', 'make_router', '(', '*', 'routings', ')', 'return', 'router'] | Return a WSGI application that searches requests to controllers | ['Return', 'a', 'WSGI', 'application', 'that', 'searches', 'requests', 'to', 'controllers'] | train | https://github.com/openfisca/openfisca-web-api/blob/d1cd3bfacac338e80bb0df7e0465b65649dd893b/openfisca_web_api/controllers/__init__.py#L31-L49 |
8,658 | druids/django-chamber | chamber/utils/__init__.py | get_class_method | def get_class_method(cls_or_inst, method_name):
"""
Returns a method from a given class or instance. When the method doest not exist, it returns `None`. Also works with
properties and cached properties.
"""
cls = cls_or_inst if isinstance(cls_or_inst, type) else cls_or_inst.__class__
meth = getattr(cls, method_name, None)
if isinstance(meth, property):
meth = meth.fget
elif isinstance(meth, cached_property):
meth = meth.func
return meth | python | def get_class_method(cls_or_inst, method_name):
"""
Returns a method from a given class or instance. When the method doest not exist, it returns `None`. Also works with
properties and cached properties.
"""
cls = cls_or_inst if isinstance(cls_or_inst, type) else cls_or_inst.__class__
meth = getattr(cls, method_name, None)
if isinstance(meth, property):
meth = meth.fget
elif isinstance(meth, cached_property):
meth = meth.func
return meth | ['def', 'get_class_method', '(', 'cls_or_inst', ',', 'method_name', ')', ':', 'cls', '=', 'cls_or_inst', 'if', 'isinstance', '(', 'cls_or_inst', ',', 'type', ')', 'else', 'cls_or_inst', '.', '__class__', 'meth', '=', 'getattr', '(', 'cls', ',', 'method_name', ',', 'None', ')', 'if', 'isinstance', '(', 'meth', ',', 'property', ')', ':', 'meth', '=', 'meth', '.', 'fget', 'elif', 'isinstance', '(', 'meth', ',', 'cached_property', ')', ':', 'meth', '=', 'meth', '.', 'func', 'return', 'meth'] | Returns a method from a given class or instance. When the method doest not exist, it returns `None`. Also works with
properties and cached properties. | ['Returns', 'a', 'method', 'from', 'a', 'given', 'class', 'or', 'instance', '.', 'When', 'the', 'method', 'doest', 'not', 'exist', 'it', 'returns', 'None', '.', 'Also', 'works', 'with', 'properties', 'and', 'cached', 'properties', '.'] | train | https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/utils/__init__.py#L17-L28 |
8,659 | resync/resync | resync/capability_list.py | CapabilityList.capability_info | def capability_info(self, name=None):
"""Return information about the requested capability from this list.
Will return None if there is no information about the requested capability.
"""
for r in self.resources:
if (r.capability == name):
return(r)
return(None) | python | def capability_info(self, name=None):
"""Return information about the requested capability from this list.
Will return None if there is no information about the requested capability.
"""
for r in self.resources:
if (r.capability == name):
return(r)
return(None) | ['def', 'capability_info', '(', 'self', ',', 'name', '=', 'None', ')', ':', 'for', 'r', 'in', 'self', '.', 'resources', ':', 'if', '(', 'r', '.', 'capability', '==', 'name', ')', ':', 'return', '(', 'r', ')', 'return', '(', 'None', ')'] | Return information about the requested capability from this list.
Will return None if there is no information about the requested capability. | ['Return', 'information', 'about', 'the', 'requested', 'capability', 'from', 'this', 'list', '.'] | train | https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/capability_list.py#L117-L125 |
8,660 | OnroerendErfgoed/oe_utils | oe_utils/validation/validators_actor.py | TelefoonSchemaNode.preparer | def preparer(telefoon):
'''
Edit a phone value to a value that can be validated as a
phone number.
This takes the incoming value and :
Removes all whitespace ( space, tab , newline , ... ) characters
Removes the following characters: " / - . "
If no + is present at frond, add the country code
In short: just add a + at the beginning of the country code.
'''
if telefoon is None or telefoon == colander.null:
return colander.null
if 'landcode' in telefoon and telefoon.get('landcode') is not None:
landcode = telefoon.get('landcode')
value = re.sub(r'\s+', '', landcode).replace('.', '').replace('/', '').replace(',', '').replace('-', ''). \
lstrip('0')
telefoon['landcode'] = '+' + value if value[0] != '+' else value
if 'nummer' in telefoon and telefoon.get('nummer') is not None:
nummer = telefoon.get('nummer')
value = re.sub(r'\s+', '', nummer).replace('.', '').replace('/', '').replace(',', '').replace('-', ''). \
lstrip('0')
telefoon['nummer'] = value
return telefoon | python | def preparer(telefoon):
'''
Edit a phone value to a value that can be validated as a
phone number.
This takes the incoming value and :
Removes all whitespace ( space, tab , newline , ... ) characters
Removes the following characters: " / - . "
If no + is present at frond, add the country code
In short: just add a + at the beginning of the country code.
'''
if telefoon is None or telefoon == colander.null:
return colander.null
if 'landcode' in telefoon and telefoon.get('landcode') is not None:
landcode = telefoon.get('landcode')
value = re.sub(r'\s+', '', landcode).replace('.', '').replace('/', '').replace(',', '').replace('-', ''). \
lstrip('0')
telefoon['landcode'] = '+' + value if value[0] != '+' else value
if 'nummer' in telefoon and telefoon.get('nummer') is not None:
nummer = telefoon.get('nummer')
value = re.sub(r'\s+', '', nummer).replace('.', '').replace('/', '').replace(',', '').replace('-', ''). \
lstrip('0')
telefoon['nummer'] = value
return telefoon | ['def', 'preparer', '(', 'telefoon', ')', ':', 'if', 'telefoon', 'is', 'None', 'or', 'telefoon', '==', 'colander', '.', 'null', ':', 'return', 'colander', '.', 'null', 'if', "'landcode'", 'in', 'telefoon', 'and', 'telefoon', '.', 'get', '(', "'landcode'", ')', 'is', 'not', 'None', ':', 'landcode', '=', 'telefoon', '.', 'get', '(', "'landcode'", ')', 'value', '=', 're', '.', 'sub', '(', "r'\\s+'", ',', "''", ',', 'landcode', ')', '.', 'replace', '(', "'.'", ',', "''", ')', '.', 'replace', '(', "'/'", ',', "''", ')', '.', 'replace', '(', "','", ',', "''", ')', '.', 'replace', '(', "'-'", ',', "''", ')', '.', 'lstrip', '(', "'0'", ')', 'telefoon', '[', "'landcode'", ']', '=', "'+'", '+', 'value', 'if', 'value', '[', '0', ']', '!=', "'+'", 'else', 'value', 'if', "'nummer'", 'in', 'telefoon', 'and', 'telefoon', '.', 'get', '(', "'nummer'", ')', 'is', 'not', 'None', ':', 'nummer', '=', 'telefoon', '.', 'get', '(', "'nummer'", ')', 'value', '=', 're', '.', 'sub', '(', "r'\\s+'", ',', "''", ',', 'nummer', ')', '.', 'replace', '(', "'.'", ',', "''", ')', '.', 'replace', '(', "'/'", ',', "''", ')', '.', 'replace', '(', "','", ',', "''", ')', '.', 'replace', '(', "'-'", ',', "''", ')', '.', 'lstrip', '(', "'0'", ')', 'telefoon', '[', "'nummer'", ']', '=', 'value', 'return', 'telefoon'] | Edit a phone value to a value that can be validated as a
phone number.
This takes the incoming value and :
Removes all whitespace ( space, tab , newline , ... ) characters
Removes the following characters: " / - . "
If no + is present at frond, add the country code
In short: just add a + at the beginning of the country code. | ['Edit', 'a', 'phone', 'value', 'to', 'a', 'value', 'that', 'can', 'be', 'validated', 'as', 'a', 'phone', 'number', '.', 'This', 'takes', 'the', 'incoming', 'value', 'and', ':', 'Removes', 'all', 'whitespace', '(', 'space', 'tab', 'newline', '...', ')', 'characters', 'Removes', 'the', 'following', 'characters', ':', '/', '-', '.', 'If', 'no', '+', 'is', 'present', 'at', 'frond', 'add', 'the', 'country', 'code', 'In', 'short', ':', 'just', 'add', 'a', '+', 'at', 'the', 'beginning', 'of', 'the', 'country', 'code', '.'] | train | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/validation/validators_actor.py#L96-L118 |
8,661 | myusuf3/delorean | delorean/dates.py | move_datetime_year | def move_datetime_year(dt, direction, num_shifts):
"""
Move datetime 1 year in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(years=+num_shifts)
return _move_datetime(dt, direction, delta) | python | def move_datetime_year(dt, direction, num_shifts):
"""
Move datetime 1 year in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(years=+num_shifts)
return _move_datetime(dt, direction, delta) | ['def', 'move_datetime_year', '(', 'dt', ',', 'direction', ',', 'num_shifts', ')', ':', 'delta', '=', 'relativedelta', '(', 'years', '=', '+', 'num_shifts', ')', 'return', '_move_datetime', '(', 'dt', ',', 'direction', ',', 'delta', ')'] | Move datetime 1 year in the chosen direction.
unit is a no-op, to keep the API the same as the day case | ['Move', 'datetime', '1', 'year', 'in', 'the', 'chosen', 'direction', '.', 'unit', 'is', 'a', 'no', '-', 'op', 'to', 'keep', 'the', 'API', 'the', 'same', 'as', 'the', 'day', 'case'] | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L113-L119 |
8,662 | roclark/sportsreference | sportsreference/nhl/schedule.py | Schedule.dataframe | def dataframe(self):
"""
Returns a pandas DataFrame where each row is a representation of the
Game class. Rows are indexed by the boxscore string.
"""
frames = []
for game in self.__iter__():
df = game.dataframe
if df is not None:
frames.append(df)
if frames == []:
return None
return pd.concat(frames) | python | def dataframe(self):
"""
Returns a pandas DataFrame where each row is a representation of the
Game class. Rows are indexed by the boxscore string.
"""
frames = []
for game in self.__iter__():
df = game.dataframe
if df is not None:
frames.append(df)
if frames == []:
return None
return pd.concat(frames) | ['def', 'dataframe', '(', 'self', ')', ':', 'frames', '=', '[', ']', 'for', 'game', 'in', 'self', '.', '__iter__', '(', ')', ':', 'df', '=', 'game', '.', 'dataframe', 'if', 'df', 'is', 'not', 'None', ':', 'frames', '.', 'append', '(', 'df', ')', 'if', 'frames', '==', '[', ']', ':', 'return', 'None', 'return', 'pd', '.', 'concat', '(', 'frames', ')'] | Returns a pandas DataFrame where each row is a representation of the
Game class. Rows are indexed by the boxscore string. | ['Returns', 'a', 'pandas', 'DataFrame', 'where', 'each', 'row', 'is', 'a', 'representation', 'of', 'the', 'Game', 'class', '.', 'Rows', 'are', 'indexed', 'by', 'the', 'boxscore', 'string', '.'] | train | https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nhl/schedule.py#L588-L600 |
8,663 | inveniosoftware/invenio-pidstore | invenio_pidstore/ext.py | InvenioPIDStore.init_app | def init_app(self, app, minters_entry_point_group=None,
fetchers_entry_point_group=None):
"""Flask application initialization.
Initialize:
* The CLI commands.
* Initialize the logger (Default: `app.debug`).
* Initialize the default admin object link endpoint.
(Default: `{"rec": "recordmetadata.details_view"}` if
`invenio-records` is installed, otherwise `{}`).
* Register the `pid_exists` template filter.
* Initialize extension state.
:param app: The Flask application
:param minters_entry_point_group: The minters entry point group
(Default: None).
:param fetchers_entry_point_group: The fetchers entry point group
(Default: None).
:returns: PIDStore state application.
"""
self.init_config(app)
# Initialize CLI
app.cli.add_command(cmd)
# Initialize logger
app.config.setdefault('PIDSTORE_APP_LOGGER_HANDLERS', app.debug)
if app.config['PIDSTORE_APP_LOGGER_HANDLERS']:
for handler in app.logger.handlers:
logger.addHandler(handler)
# Initialize admin object link endpoints.
try:
pkg_resources.get_distribution('invenio-records')
app.config.setdefault('PIDSTORE_OBJECT_ENDPOINTS', dict(
rec='recordmetadata.details_view',
))
except pkg_resources.DistributionNotFound:
app.config.setdefault('PIDSTORE_OBJECT_ENDPOINTS', {})
# Register template filter
app.jinja_env.filters['pid_exists'] = pid_exists
# Initialize extension state.
state = _PIDStoreState(
app=app,
minters_entry_point_group=minters_entry_point_group,
fetchers_entry_point_group=fetchers_entry_point_group,
)
app.extensions['invenio-pidstore'] = state
return state | python | def init_app(self, app, minters_entry_point_group=None,
fetchers_entry_point_group=None):
"""Flask application initialization.
Initialize:
* The CLI commands.
* Initialize the logger (Default: `app.debug`).
* Initialize the default admin object link endpoint.
(Default: `{"rec": "recordmetadata.details_view"}` if
`invenio-records` is installed, otherwise `{}`).
* Register the `pid_exists` template filter.
* Initialize extension state.
:param app: The Flask application
:param minters_entry_point_group: The minters entry point group
(Default: None).
:param fetchers_entry_point_group: The fetchers entry point group
(Default: None).
:returns: PIDStore state application.
"""
self.init_config(app)
# Initialize CLI
app.cli.add_command(cmd)
# Initialize logger
app.config.setdefault('PIDSTORE_APP_LOGGER_HANDLERS', app.debug)
if app.config['PIDSTORE_APP_LOGGER_HANDLERS']:
for handler in app.logger.handlers:
logger.addHandler(handler)
# Initialize admin object link endpoints.
try:
pkg_resources.get_distribution('invenio-records')
app.config.setdefault('PIDSTORE_OBJECT_ENDPOINTS', dict(
rec='recordmetadata.details_view',
))
except pkg_resources.DistributionNotFound:
app.config.setdefault('PIDSTORE_OBJECT_ENDPOINTS', {})
# Register template filter
app.jinja_env.filters['pid_exists'] = pid_exists
# Initialize extension state.
state = _PIDStoreState(
app=app,
minters_entry_point_group=minters_entry_point_group,
fetchers_entry_point_group=fetchers_entry_point_group,
)
app.extensions['invenio-pidstore'] = state
return state | ['def', 'init_app', '(', 'self', ',', 'app', ',', 'minters_entry_point_group', '=', 'None', ',', 'fetchers_entry_point_group', '=', 'None', ')', ':', 'self', '.', 'init_config', '(', 'app', ')', '# Initialize CLI', 'app', '.', 'cli', '.', 'add_command', '(', 'cmd', ')', '# Initialize logger', 'app', '.', 'config', '.', 'setdefault', '(', "'PIDSTORE_APP_LOGGER_HANDLERS'", ',', 'app', '.', 'debug', ')', 'if', 'app', '.', 'config', '[', "'PIDSTORE_APP_LOGGER_HANDLERS'", ']', ':', 'for', 'handler', 'in', 'app', '.', 'logger', '.', 'handlers', ':', 'logger', '.', 'addHandler', '(', 'handler', ')', '# Initialize admin object link endpoints.', 'try', ':', 'pkg_resources', '.', 'get_distribution', '(', "'invenio-records'", ')', 'app', '.', 'config', '.', 'setdefault', '(', "'PIDSTORE_OBJECT_ENDPOINTS'", ',', 'dict', '(', 'rec', '=', "'recordmetadata.details_view'", ',', ')', ')', 'except', 'pkg_resources', '.', 'DistributionNotFound', ':', 'app', '.', 'config', '.', 'setdefault', '(', "'PIDSTORE_OBJECT_ENDPOINTS'", ',', '{', '}', ')', '# Register template filter', 'app', '.', 'jinja_env', '.', 'filters', '[', "'pid_exists'", ']', '=', 'pid_exists', '# Initialize extension state.', 'state', '=', '_PIDStoreState', '(', 'app', '=', 'app', ',', 'minters_entry_point_group', '=', 'minters_entry_point_group', ',', 'fetchers_entry_point_group', '=', 'fetchers_entry_point_group', ',', ')', 'app', '.', 'extensions', '[', "'invenio-pidstore'", ']', '=', 'state', 'return', 'state'] | Flask application initialization.
Initialize:
* The CLI commands.
* Initialize the logger (Default: `app.debug`).
* Initialize the default admin object link endpoint.
(Default: `{"rec": "recordmetadata.details_view"}` if
`invenio-records` is installed, otherwise `{}`).
* Register the `pid_exists` template filter.
* Initialize extension state.
:param app: The Flask application
:param minters_entry_point_group: The minters entry point group
(Default: None).
:param fetchers_entry_point_group: The fetchers entry point group
(Default: None).
:returns: PIDStore state application. | ['Flask', 'application', 'initialization', '.'] | train | https://github.com/inveniosoftware/invenio-pidstore/blob/8bf35f4e62d5dcaf1a2cfe5803245ba5220a9b78/invenio_pidstore/ext.py#L103-L157 |
8,664 | materialsproject/pymatgen | pymatgen/entries/entry_tools.py | EntrySet.chemsys | def chemsys(self) -> set:
"""
Returns:
set representing the chemical system, e.g., {"Li", "Fe", "P", "O"}
"""
chemsys = set()
for e in self.entries:
chemsys.update([el.symbol for el in e.composition.keys()])
return chemsys | python | def chemsys(self) -> set:
"""
Returns:
set representing the chemical system, e.g., {"Li", "Fe", "P", "O"}
"""
chemsys = set()
for e in self.entries:
chemsys.update([el.symbol for el in e.composition.keys()])
return chemsys | ['def', 'chemsys', '(', 'self', ')', '->', 'set', ':', 'chemsys', '=', 'set', '(', ')', 'for', 'e', 'in', 'self', '.', 'entries', ':', 'chemsys', '.', 'update', '(', '[', 'el', '.', 'symbol', 'for', 'el', 'in', 'e', '.', 'composition', '.', 'keys', '(', ')', ']', ')', 'return', 'chemsys'] | Returns:
set representing the chemical system, e.g., {"Li", "Fe", "P", "O"} | ['Returns', ':', 'set', 'representing', 'the', 'chemical', 'system', 'e', '.', 'g', '.', '{', 'Li', 'Fe', 'P', 'O', '}'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/entries/entry_tools.py#L179-L187 |
8,665 | Stranger6667/postmarker | postmarker/models/emails.py | EmailBatch._construct_email | def _construct_email(self, email, **extra):
"""
Converts incoming data to properly structured dictionary.
"""
if isinstance(email, dict):
email = Email(manager=self._manager, **email)
elif isinstance(email, (MIMEText, MIMEMultipart)):
email = Email.from_mime(email, self._manager)
elif not isinstance(email, Email):
raise ValueError
email._update(extra)
return email.as_dict() | python | def _construct_email(self, email, **extra):
"""
Converts incoming data to properly structured dictionary.
"""
if isinstance(email, dict):
email = Email(manager=self._manager, **email)
elif isinstance(email, (MIMEText, MIMEMultipart)):
email = Email.from_mime(email, self._manager)
elif not isinstance(email, Email):
raise ValueError
email._update(extra)
return email.as_dict() | ['def', '_construct_email', '(', 'self', ',', 'email', ',', '*', '*', 'extra', ')', ':', 'if', 'isinstance', '(', 'email', ',', 'dict', ')', ':', 'email', '=', 'Email', '(', 'manager', '=', 'self', '.', '_manager', ',', '*', '*', 'email', ')', 'elif', 'isinstance', '(', 'email', ',', '(', 'MIMEText', ',', 'MIMEMultipart', ')', ')', ':', 'email', '=', 'Email', '.', 'from_mime', '(', 'email', ',', 'self', '.', '_manager', ')', 'elif', 'not', 'isinstance', '(', 'email', ',', 'Email', ')', ':', 'raise', 'ValueError', 'email', '.', '_update', '(', 'extra', ')', 'return', 'email', '.', 'as_dict', '(', ')'] | Converts incoming data to properly structured dictionary. | ['Converts', 'incoming', 'data', 'to', 'properly', 'structured', 'dictionary', '.'] | train | https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/emails.py#L243-L254 |
8,666 | julienr/meshcut | examples/utils.py | show_plane | def show_plane(orig, n, scale=1.0, **kwargs):
"""
Show the plane with the given origin and normal. scale give its size
"""
b1 = orthogonal_vector(n)
b1 /= la.norm(b1)
b2 = np.cross(b1, n)
b2 /= la.norm(b2)
verts = [orig + scale*(-b1 - b2),
orig + scale*(b1 - b2),
orig + scale*(b1 + b2),
orig + scale*(-b1 + b2)]
faces = [(0, 1, 2), (0, 2, 3)]
trimesh3d(np.array(verts), faces, **kwargs) | python | def show_plane(orig, n, scale=1.0, **kwargs):
"""
Show the plane with the given origin and normal. scale give its size
"""
b1 = orthogonal_vector(n)
b1 /= la.norm(b1)
b2 = np.cross(b1, n)
b2 /= la.norm(b2)
verts = [orig + scale*(-b1 - b2),
orig + scale*(b1 - b2),
orig + scale*(b1 + b2),
orig + scale*(-b1 + b2)]
faces = [(0, 1, 2), (0, 2, 3)]
trimesh3d(np.array(verts), faces, **kwargs) | ['def', 'show_plane', '(', 'orig', ',', 'n', ',', 'scale', '=', '1.0', ',', '*', '*', 'kwargs', ')', ':', 'b1', '=', 'orthogonal_vector', '(', 'n', ')', 'b1', '/=', 'la', '.', 'norm', '(', 'b1', ')', 'b2', '=', 'np', '.', 'cross', '(', 'b1', ',', 'n', ')', 'b2', '/=', 'la', '.', 'norm', '(', 'b2', ')', 'verts', '=', '[', 'orig', '+', 'scale', '*', '(', '-', 'b1', '-', 'b2', ')', ',', 'orig', '+', 'scale', '*', '(', 'b1', '-', 'b2', ')', ',', 'orig', '+', 'scale', '*', '(', 'b1', '+', 'b2', ')', ',', 'orig', '+', 'scale', '*', '(', '-', 'b1', '+', 'b2', ')', ']', 'faces', '=', '[', '(', '0', ',', '1', ',', '2', ')', ',', '(', '0', ',', '2', ',', '3', ')', ']', 'trimesh3d', '(', 'np', '.', 'array', '(', 'verts', ')', ',', 'faces', ',', '*', '*', 'kwargs', ')'] | Show the plane with the given origin and normal. scale give its size | ['Show', 'the', 'plane', 'with', 'the', 'given', 'origin', 'and', 'normal', '.', 'scale', 'give', 'its', 'size'] | train | https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/examples/utils.py#L27-L40 |
8,667 | hydpy-dev/hydpy | hydpy/models/dam/dam_derived.py | HighestRemoteSmoothPar.update | def update(self):
"""Calculate the smoothing parameter value.
The following example is explained in some detail in module
|smoothtools|:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> highestremotedischarge(1.0)
>>> highestremotetolerance(0.0)
>>> derived.highestremotesmoothpar.update()
>>> from hydpy.cythons.smoothutils import smooth_min1
>>> from hydpy import round_
>>> round_(smooth_min1(-4.0, 1.5, derived.highestremotesmoothpar))
-4.0
>>> highestremotetolerance(2.5)
>>> derived.highestremotesmoothpar.update()
>>> round_(smooth_min1(-4.0, -1.5, derived.highestremotesmoothpar))
-4.01
Note that the example above corresponds to the example on function
|calc_smoothpar_min1|, due to the value of parameter
|HighestRemoteDischarge| being 1 m³/s. Doubling the value of
|HighestRemoteDischarge| also doubles the value of
|HighestRemoteSmoothPar| proportional. This leads to the following
result:
>>> highestremotedischarge(2.0)
>>> derived.highestremotesmoothpar.update()
>>> round_(smooth_min1(-4.0, 1.0, derived.highestremotesmoothpar))
-4.02
This relationship between |HighestRemoteDischarge| and
|HighestRemoteSmoothPar| prevents from any smoothing when
the value of |HighestRemoteDischarge| is zero:
>>> highestremotedischarge(0.0)
>>> derived.highestremotesmoothpar.update()
>>> round_(smooth_min1(1.0, 1.0, derived.highestremotesmoothpar))
1.0
In addition, |HighestRemoteSmoothPar| is set to zero if
|HighestRemoteDischarge| is infinity (because no actual value
will ever come in the vicinit of infinity), which is why no
value would be changed through smoothing anyway):
>>> highestremotedischarge(inf)
>>> derived.highestremotesmoothpar.update()
>>> round_(smooth_min1(1.0, 1.0, derived.highestremotesmoothpar))
1.0
"""
control = self.subpars.pars.control
if numpy.isinf(control.highestremotedischarge):
self(0.0)
else:
self(control.highestremotedischarge *
smoothtools.calc_smoothpar_min1(control.highestremotetolerance)
) | python | def update(self):
"""Calculate the smoothing parameter value.
The following example is explained in some detail in module
|smoothtools|:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> highestremotedischarge(1.0)
>>> highestremotetolerance(0.0)
>>> derived.highestremotesmoothpar.update()
>>> from hydpy.cythons.smoothutils import smooth_min1
>>> from hydpy import round_
>>> round_(smooth_min1(-4.0, 1.5, derived.highestremotesmoothpar))
-4.0
>>> highestremotetolerance(2.5)
>>> derived.highestremotesmoothpar.update()
>>> round_(smooth_min1(-4.0, -1.5, derived.highestremotesmoothpar))
-4.01
Note that the example above corresponds to the example on function
|calc_smoothpar_min1|, due to the value of parameter
|HighestRemoteDischarge| being 1 m³/s. Doubling the value of
|HighestRemoteDischarge| also doubles the value of
|HighestRemoteSmoothPar| proportional. This leads to the following
result:
>>> highestremotedischarge(2.0)
>>> derived.highestremotesmoothpar.update()
>>> round_(smooth_min1(-4.0, 1.0, derived.highestremotesmoothpar))
-4.02
This relationship between |HighestRemoteDischarge| and
|HighestRemoteSmoothPar| prevents from any smoothing when
the value of |HighestRemoteDischarge| is zero:
>>> highestremotedischarge(0.0)
>>> derived.highestremotesmoothpar.update()
>>> round_(smooth_min1(1.0, 1.0, derived.highestremotesmoothpar))
1.0
In addition, |HighestRemoteSmoothPar| is set to zero if
|HighestRemoteDischarge| is infinity (because no actual value
will ever come in the vicinit of infinity), which is why no
value would be changed through smoothing anyway):
>>> highestremotedischarge(inf)
>>> derived.highestremotesmoothpar.update()
>>> round_(smooth_min1(1.0, 1.0, derived.highestremotesmoothpar))
1.0
"""
control = self.subpars.pars.control
if numpy.isinf(control.highestremotedischarge):
self(0.0)
else:
self(control.highestremotedischarge *
smoothtools.calc_smoothpar_min1(control.highestremotetolerance)
) | ['def', 'update', '(', 'self', ')', ':', 'control', '=', 'self', '.', 'subpars', '.', 'pars', '.', 'control', 'if', 'numpy', '.', 'isinf', '(', 'control', '.', 'highestremotedischarge', ')', ':', 'self', '(', '0.0', ')', 'else', ':', 'self', '(', 'control', '.', 'highestremotedischarge', '*', 'smoothtools', '.', 'calc_smoothpar_min1', '(', 'control', '.', 'highestremotetolerance', ')', ')'] | Calculate the smoothing parameter value.
The following example is explained in some detail in module
|smoothtools|:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> highestremotedischarge(1.0)
>>> highestremotetolerance(0.0)
>>> derived.highestremotesmoothpar.update()
>>> from hydpy.cythons.smoothutils import smooth_min1
>>> from hydpy import round_
>>> round_(smooth_min1(-4.0, 1.5, derived.highestremotesmoothpar))
-4.0
>>> highestremotetolerance(2.5)
>>> derived.highestremotesmoothpar.update()
>>> round_(smooth_min1(-4.0, -1.5, derived.highestremotesmoothpar))
-4.01
Note that the example above corresponds to the example on function
|calc_smoothpar_min1|, due to the value of parameter
|HighestRemoteDischarge| being 1 m³/s. Doubling the value of
|HighestRemoteDischarge| also doubles the value of
|HighestRemoteSmoothPar| proportional. This leads to the following
result:
>>> highestremotedischarge(2.0)
>>> derived.highestremotesmoothpar.update()
>>> round_(smooth_min1(-4.0, 1.0, derived.highestremotesmoothpar))
-4.02
This relationship between |HighestRemoteDischarge| and
|HighestRemoteSmoothPar| prevents from any smoothing when
the value of |HighestRemoteDischarge| is zero:
>>> highestremotedischarge(0.0)
>>> derived.highestremotesmoothpar.update()
>>> round_(smooth_min1(1.0, 1.0, derived.highestremotesmoothpar))
1.0
In addition, |HighestRemoteSmoothPar| is set to zero if
|HighestRemoteDischarge| is infinity (because no actual value
will ever come in the vicinit of infinity), which is why no
value would be changed through smoothing anyway):
>>> highestremotedischarge(inf)
>>> derived.highestremotesmoothpar.update()
>>> round_(smooth_min1(1.0, 1.0, derived.highestremotesmoothpar))
1.0 | ['Calculate', 'the', 'smoothing', 'parameter', 'value', '.'] | train | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/dam/dam_derived.py#L246-L303 |
8,668 | google/transitfeed | kmlparser.py | KmlParser.ParseDom | def ParseDom(self, dom, feed):
"""
Parses the given kml dom tree and updates the Google transit feed object.
Args:
dom - kml dom tree
feed - an instance of Schedule class to be updated
"""
shape_num = 0
for node in dom.getElementsByTagName('Placemark'):
p = self.ParsePlacemark(node)
if p.IsPoint():
(lon, lat) = p.coordinates[0]
m = self.stopNameRe.search(p.name)
feed.AddStop(lat, lon, m.group(1))
elif p.IsLine():
self.ConvertPlacemarkToShape(p, feed) | python | def ParseDom(self, dom, feed):
"""
Parses the given kml dom tree and updates the Google transit feed object.
Args:
dom - kml dom tree
feed - an instance of Schedule class to be updated
"""
shape_num = 0
for node in dom.getElementsByTagName('Placemark'):
p = self.ParsePlacemark(node)
if p.IsPoint():
(lon, lat) = p.coordinates[0]
m = self.stopNameRe.search(p.name)
feed.AddStop(lat, lon, m.group(1))
elif p.IsLine():
self.ConvertPlacemarkToShape(p, feed) | ['def', 'ParseDom', '(', 'self', ',', 'dom', ',', 'feed', ')', ':', 'shape_num', '=', '0', 'for', 'node', 'in', 'dom', '.', 'getElementsByTagName', '(', "'Placemark'", ')', ':', 'p', '=', 'self', '.', 'ParsePlacemark', '(', 'node', ')', 'if', 'p', '.', 'IsPoint', '(', ')', ':', '(', 'lon', ',', 'lat', ')', '=', 'p', '.', 'coordinates', '[', '0', ']', 'm', '=', 'self', '.', 'stopNameRe', '.', 'search', '(', 'p', '.', 'name', ')', 'feed', '.', 'AddStop', '(', 'lat', ',', 'lon', ',', 'm', '.', 'group', '(', '1', ')', ')', 'elif', 'p', '.', 'IsLine', '(', ')', ':', 'self', '.', 'ConvertPlacemarkToShape', '(', 'p', ',', 'feed', ')'] | Parses the given kml dom tree and updates the Google transit feed object.
Args:
dom - kml dom tree
feed - an instance of Schedule class to be updated | ['Parses', 'the', 'given', 'kml', 'dom', 'tree', 'and', 'updates', 'the', 'Google', 'transit', 'feed', 'object', '.'] | train | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/kmlparser.py#L73-L89 |
8,669 | brainiak/brainiak | brainiak/image.py | mask_image | def mask_image(image: SpatialImage, mask: np.ndarray, data_type: type = None
) -> np.ndarray:
"""Mask image after optionally casting its type.
Parameters
----------
image
Image to mask. Can include time as the last dimension.
mask
Mask to apply. Must have the same shape as the image data.
data_type
Type to cast image to.
Returns
-------
np.ndarray
Masked image.
Raises
------
ValueError
Image data and masks have different shapes.
"""
image_data = image.get_data()
if image_data.shape[:3] != mask.shape:
raise ValueError("Image data and mask have different shapes.")
if data_type is not None:
cast_data = image_data.astype(data_type)
else:
cast_data = image_data
return cast_data[mask] | python | def mask_image(image: SpatialImage, mask: np.ndarray, data_type: type = None
) -> np.ndarray:
"""Mask image after optionally casting its type.
Parameters
----------
image
Image to mask. Can include time as the last dimension.
mask
Mask to apply. Must have the same shape as the image data.
data_type
Type to cast image to.
Returns
-------
np.ndarray
Masked image.
Raises
------
ValueError
Image data and masks have different shapes.
"""
image_data = image.get_data()
if image_data.shape[:3] != mask.shape:
raise ValueError("Image data and mask have different shapes.")
if data_type is not None:
cast_data = image_data.astype(data_type)
else:
cast_data = image_data
return cast_data[mask] | ['def', 'mask_image', '(', 'image', ':', 'SpatialImage', ',', 'mask', ':', 'np', '.', 'ndarray', ',', 'data_type', ':', 'type', '=', 'None', ')', '->', 'np', '.', 'ndarray', ':', 'image_data', '=', 'image', '.', 'get_data', '(', ')', 'if', 'image_data', '.', 'shape', '[', ':', '3', ']', '!=', 'mask', '.', 'shape', ':', 'raise', 'ValueError', '(', '"Image data and mask have different shapes."', ')', 'if', 'data_type', 'is', 'not', 'None', ':', 'cast_data', '=', 'image_data', '.', 'astype', '(', 'data_type', ')', 'else', ':', 'cast_data', '=', 'image_data', 'return', 'cast_data', '[', 'mask', ']'] | Mask image after optionally casting its type.
Parameters
----------
image
Image to mask. Can include time as the last dimension.
mask
Mask to apply. Must have the same shape as the image data.
data_type
Type to cast image to.
Returns
-------
np.ndarray
Masked image.
Raises
------
ValueError
Image data and masks have different shapes. | ['Mask', 'image', 'after', 'optionally', 'casting', 'its', 'type', '.'] | train | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/image.py#L107-L137 |
8,670 | fastmonkeys/pontus | pontus/_compat.py | unicode_compatible | def unicode_compatible(cls):
"""
A decorator that defines ``__str__`` and ``__unicode__`` methods
under Python 2.
"""
if not is_py3:
cls.__unicode__ = cls.__str__
cls.__str__ = lambda self: self.__unicode__().encode('utf-8')
return cls | python | def unicode_compatible(cls):
"""
A decorator that defines ``__str__`` and ``__unicode__`` methods
under Python 2.
"""
if not is_py3:
cls.__unicode__ = cls.__str__
cls.__str__ = lambda self: self.__unicode__().encode('utf-8')
return cls | ['def', 'unicode_compatible', '(', 'cls', ')', ':', 'if', 'not', 'is_py3', ':', 'cls', '.', '__unicode__', '=', 'cls', '.', '__str__', 'cls', '.', '__str__', '=', 'lambda', 'self', ':', 'self', '.', '__unicode__', '(', ')', '.', 'encode', '(', "'utf-8'", ')', 'return', 'cls'] | A decorator that defines ``__str__`` and ``__unicode__`` methods
under Python 2. | ['A', 'decorator', 'that', 'defines', '__str__', 'and', '__unicode__', 'methods', 'under', 'Python', '2', '.'] | train | https://github.com/fastmonkeys/pontus/blob/cf02fb22c4558b899e2dcbe437a1a525321c4f12/pontus/_compat.py#L25-L33 |
8,671 | bxlab/bx-python | lib/bx_extras/stats.py | lgeometricmean | def lgeometricmean (inlist):
"""
Calculates the geometric mean of the values in the passed list.
That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list.
Usage: lgeometricmean(inlist)
"""
mult = 1.0
one_over_n = 1.0/len(inlist)
for item in inlist:
mult = mult * pow(item,one_over_n)
return mult | python | def lgeometricmean (inlist):
"""
Calculates the geometric mean of the values in the passed list.
That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list.
Usage: lgeometricmean(inlist)
"""
mult = 1.0
one_over_n = 1.0/len(inlist)
for item in inlist:
mult = mult * pow(item,one_over_n)
return mult | ['def', 'lgeometricmean', '(', 'inlist', ')', ':', 'mult', '=', '1.0', 'one_over_n', '=', '1.0', '/', 'len', '(', 'inlist', ')', 'for', 'item', 'in', 'inlist', ':', 'mult', '=', 'mult', '*', 'pow', '(', 'item', ',', 'one_over_n', ')', 'return', 'mult'] | Calculates the geometric mean of the values in the passed list.
That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list.
Usage: lgeometricmean(inlist) | ['Calculates', 'the', 'geometric', 'mean', 'of', 'the', 'values', 'in', 'the', 'passed', 'list', '.', 'That', 'is', ':', 'n', '-', 'th', 'root', 'of', '(', 'x1', '*', 'x2', '*', '...', '*', 'xn', ')', '.', 'Assumes', 'a', '1D', 'list', '.'] | train | https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L271-L282 |
8,672 | Yelp/kafka-utils | kafka_utils/main.py | parse_args | def parse_args():
"""Parse the arguments."""
parser = argparse.ArgumentParser(
description='Show available clusters.'
)
parser.add_argument(
'-v',
'--version',
action='version',
version="%(prog)s {0}".format(__version__),
)
parser.add_argument(
'--discovery-base-path',
dest='discovery_base_path',
type=str,
help='Path of the directory containing the <cluster_type>.yaml config.'
' Default try: '
'$KAFKA_DISCOVERY_DIR, $HOME/.kafka_discovery, /etc/kafka_discovery',
)
return parser.parse_args() | python | def parse_args():
"""Parse the arguments."""
parser = argparse.ArgumentParser(
description='Show available clusters.'
)
parser.add_argument(
'-v',
'--version',
action='version',
version="%(prog)s {0}".format(__version__),
)
parser.add_argument(
'--discovery-base-path',
dest='discovery_base_path',
type=str,
help='Path of the directory containing the <cluster_type>.yaml config.'
' Default try: '
'$KAFKA_DISCOVERY_DIR, $HOME/.kafka_discovery, /etc/kafka_discovery',
)
return parser.parse_args() | ['def', 'parse_args', '(', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', "'Show available clusters.'", ')', 'parser', '.', 'add_argument', '(', "'-v'", ',', "'--version'", ',', 'action', '=', "'version'", ',', 'version', '=', '"%(prog)s {0}"', '.', 'format', '(', '__version__', ')', ',', ')', 'parser', '.', 'add_argument', '(', "'--discovery-base-path'", ',', 'dest', '=', "'discovery_base_path'", ',', 'type', '=', 'str', ',', 'help', '=', "'Path of the directory containing the <cluster_type>.yaml config.'", "' Default try: '", "'$KAFKA_DISCOVERY_DIR, $HOME/.kafka_discovery, /etc/kafka_discovery'", ',', ')', 'return', 'parser', '.', 'parse_args', '(', ')'] | Parse the arguments. | ['Parse', 'the', 'arguments', '.'] | train | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/main.py#L29-L49 |
8,673 | hhatto/autopep8 | autopep8.py | FixPEP8.fix_e702 | def fix_e702(self, result, logical):
"""Put semicolon-separated compound statement on separate lines."""
if not logical:
return [] # pragma: no cover
logical_lines = logical[2]
# Avoid applying this when indented.
# https://docs.python.org/reference/compound_stmts.html
for line in logical_lines:
if (result['id'] == 'E702' and ':' in line
and STARTSWITH_DEF_REGEX.match(line)):
return []
line_index = result['line'] - 1
target = self.source[line_index]
if target.rstrip().endswith('\\'):
# Normalize '1; \\\n2' into '1; 2'.
self.source[line_index] = target.rstrip('\n \r\t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2]
if target.rstrip().endswith(';'):
self.source[line_index] = target.rstrip('\n \r\t;') + '\n'
return [line_index + 1]
offset = result['column'] - 1
first = target[:offset].rstrip(';').rstrip()
second = (_get_indentation(logical_lines[0]) +
target[offset:].lstrip(';').lstrip())
# Find inline comment.
inline_comment = None
if target[offset:].lstrip(';').lstrip()[:2] == '# ':
inline_comment = target[offset:].lstrip(';')
if inline_comment:
self.source[line_index] = first + inline_comment
else:
self.source[line_index] = first + '\n' + second
return [line_index + 1] | python | def fix_e702(self, result, logical):
"""Put semicolon-separated compound statement on separate lines."""
if not logical:
return [] # pragma: no cover
logical_lines = logical[2]
# Avoid applying this when indented.
# https://docs.python.org/reference/compound_stmts.html
for line in logical_lines:
if (result['id'] == 'E702' and ':' in line
and STARTSWITH_DEF_REGEX.match(line)):
return []
line_index = result['line'] - 1
target = self.source[line_index]
if target.rstrip().endswith('\\'):
# Normalize '1; \\\n2' into '1; 2'.
self.source[line_index] = target.rstrip('\n \r\t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2]
if target.rstrip().endswith(';'):
self.source[line_index] = target.rstrip('\n \r\t;') + '\n'
return [line_index + 1]
offset = result['column'] - 1
first = target[:offset].rstrip(';').rstrip()
second = (_get_indentation(logical_lines[0]) +
target[offset:].lstrip(';').lstrip())
# Find inline comment.
inline_comment = None
if target[offset:].lstrip(';').lstrip()[:2] == '# ':
inline_comment = target[offset:].lstrip(';')
if inline_comment:
self.source[line_index] = first + inline_comment
else:
self.source[line_index] = first + '\n' + second
return [line_index + 1] | ['def', 'fix_e702', '(', 'self', ',', 'result', ',', 'logical', ')', ':', 'if', 'not', 'logical', ':', 'return', '[', ']', '# pragma: no cover', 'logical_lines', '=', 'logical', '[', '2', ']', '# Avoid applying this when indented.', '# https://docs.python.org/reference/compound_stmts.html', 'for', 'line', 'in', 'logical_lines', ':', 'if', '(', 'result', '[', "'id'", ']', '==', "'E702'", 'and', "':'", 'in', 'line', 'and', 'STARTSWITH_DEF_REGEX', '.', 'match', '(', 'line', ')', ')', ':', 'return', '[', ']', 'line_index', '=', 'result', '[', "'line'", ']', '-', '1', 'target', '=', 'self', '.', 'source', '[', 'line_index', ']', 'if', 'target', '.', 'rstrip', '(', ')', '.', 'endswith', '(', "'\\\\'", ')', ':', "# Normalize '1; \\\\\\n2' into '1; 2'.", 'self', '.', 'source', '[', 'line_index', ']', '=', 'target', '.', 'rstrip', '(', "'\\n \\r\\t\\\\'", ')', 'self', '.', 'source', '[', 'line_index', '+', '1', ']', '=', 'self', '.', 'source', '[', 'line_index', '+', '1', ']', '.', 'lstrip', '(', ')', 'return', '[', 'line_index', '+', '1', ',', 'line_index', '+', '2', ']', 'if', 'target', '.', 'rstrip', '(', ')', '.', 'endswith', '(', "';'", ')', ':', 'self', '.', 'source', '[', 'line_index', ']', '=', 'target', '.', 'rstrip', '(', "'\\n \\r\\t;'", ')', '+', "'\\n'", 'return', '[', 'line_index', '+', '1', ']', 'offset', '=', 'result', '[', "'column'", ']', '-', '1', 'first', '=', 'target', '[', ':', 'offset', ']', '.', 'rstrip', '(', "';'", ')', '.', 'rstrip', '(', ')', 'second', '=', '(', '_get_indentation', '(', 'logical_lines', '[', '0', ']', ')', '+', 'target', '[', 'offset', ':', ']', '.', 'lstrip', '(', "';'", ')', '.', 'lstrip', '(', ')', ')', '# Find inline comment.', 'inline_comment', '=', 'None', 'if', 'target', '[', 'offset', ':', ']', '.', 'lstrip', '(', "';'", ')', '.', 'lstrip', '(', ')', '[', ':', '2', ']', '==', "'# '", ':', 'inline_comment', '=', 'target', '[', 'offset', ':', ']', '.', 'lstrip', '(', "';'", ')', 'if', 'inline_comment', ':', 'self', '.', 'source', '[', 'line_index', ']', '=', 'first', '+', 'inline_comment', 'else', ':', 'self', '.', 'source', '[', 'line_index', ']', '=', 'first', '+', "'\\n'", '+', 'second', 'return', '[', 'line_index', '+', '1', ']'] | Put semicolon-separated compound statement on separate lines. | ['Put', 'semicolon', '-', 'separated', 'compound', 'statement', 'on', 'separate', 'lines', '.'] | train | https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L1001-L1041 |
8,674 | jjmontesl/python-clementine-remote | clementineremote/clementine.py | ClementineRemote.set_volume | def set_volume(self, volume):
"""
Sets player volume (note, this does not change host computer main volume).
"""
msg = cr.Message()
msg.type = cr.SET_VOLUME
msg.request_set_volume.volume = int(volume)
self.send_message(msg) | python | def set_volume(self, volume):
"""
Sets player volume (note, this does not change host computer main volume).
"""
msg = cr.Message()
msg.type = cr.SET_VOLUME
msg.request_set_volume.volume = int(volume)
self.send_message(msg) | ['def', 'set_volume', '(', 'self', ',', 'volume', ')', ':', 'msg', '=', 'cr', '.', 'Message', '(', ')', 'msg', '.', 'type', '=', 'cr', '.', 'SET_VOLUME', 'msg', '.', 'request_set_volume', '.', 'volume', '=', 'int', '(', 'volume', ')', 'self', '.', 'send_message', '(', 'msg', ')'] | Sets player volume (note, this does not change host computer main volume). | ['Sets', 'player', 'volume', '(', 'note', 'this', 'does', 'not', 'change', 'host', 'computer', 'main', 'volume', ')', '.'] | train | https://github.com/jjmontesl/python-clementine-remote/blob/af5198f8bb56a4845f4e081fd8a553f935c94cde/clementineremote/clementine.py#L195-L202 |
8,675 | gwastro/pycbc | pycbc/inference/models/base.py | BaseModel.update | def update(self, **params):
"""Updates the current parameter positions and resets stats.
If any sampling transforms are specified, they are applied to the
params before being stored.
"""
self._current_params = self._transform_params(**params)
self._current_stats = ModelStats() | python | def update(self, **params):
"""Updates the current parameter positions and resets stats.
If any sampling transforms are specified, they are applied to the
params before being stored.
"""
self._current_params = self._transform_params(**params)
self._current_stats = ModelStats() | ['def', 'update', '(', 'self', ',', '*', '*', 'params', ')', ':', 'self', '.', '_current_params', '=', 'self', '.', '_transform_params', '(', '*', '*', 'params', ')', 'self', '.', '_current_stats', '=', 'ModelStats', '(', ')'] | Updates the current parameter positions and resets stats.
If any sampling transforms are specified, they are applied to the
params before being stored. | ['Updates', 'the', 'current', 'parameter', 'positions', 'and', 'resets', 'stats', '.'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/base.py#L409-L416 |
8,676 | pypa/pipenv | pipenv/vendor/shellingham/nt.py | _iter_process | def _iter_process():
"""Iterate through processes, yielding process ID and properties of each.
Example usage::
>>> for pid, info in _iter_process():
... print(pid, '->', info)
1509 -> {'parent_pid': 1201, 'executable': 'python.exe'}
"""
# TODO: Process32{First,Next} does not return full executable path, only
# the name. To get the full path, Module32{First,Next} is needed, but that
# does not contain parent process information. We probably need to call
# BOTH to build the correct process tree.
h_process = windll.kernel32.CreateToolhelp32Snapshot(
2, # dwFlags=TH32CS_SNAPPROCESS (include all processes).
0, # th32ProcessID=0 (the current process).
)
if h_process == INVALID_HANDLE_VALUE:
raise WinError()
pe = PROCESSENTRY32()
pe.dwSize = sizeof(PROCESSENTRY32)
success = windll.kernel32.Process32First(h_process, byref(pe))
while True:
if not success:
errcode = windll.kernel32.GetLastError()
if errcode == ERROR_NO_MORE_FILES:
# No more processes to iterate through, we're done here.
return
elif errcode == ERROR_INSUFFICIENT_BUFFER:
# This is likely because the file path is longer than the
# Windows limit. Just ignore it, it's likely not what we're
# looking for. We can fix this when it actually matters. (#8)
continue
raise WinError()
# The executable name would be encoded with the current code page if
# we're in ANSI mode (usually). Try to decode it into str/unicode,
# replacing invalid characters to be safe (not thoeratically necessary,
# I think). Note that we need to use 'mbcs' instead of encoding
# settings from sys because this is from the Windows API, not Python
# internals (which those settings reflect). (pypa/pipenv#3382)
executable = pe.szExeFile
if isinstance(executable, bytes):
executable = executable.decode('mbcs', 'replace')
info = {'executable': executable}
if pe.th32ParentProcessID:
info['parent_pid'] = pe.th32ParentProcessID
yield pe.th32ProcessID, info
success = windll.kernel32.Process32Next(h_process, byref(pe)) | python | def _iter_process():
"""Iterate through processes, yielding process ID and properties of each.
Example usage::
>>> for pid, info in _iter_process():
... print(pid, '->', info)
1509 -> {'parent_pid': 1201, 'executable': 'python.exe'}
"""
# TODO: Process32{First,Next} does not return full executable path, only
# the name. To get the full path, Module32{First,Next} is needed, but that
# does not contain parent process information. We probably need to call
# BOTH to build the correct process tree.
h_process = windll.kernel32.CreateToolhelp32Snapshot(
2, # dwFlags=TH32CS_SNAPPROCESS (include all processes).
0, # th32ProcessID=0 (the current process).
)
if h_process == INVALID_HANDLE_VALUE:
raise WinError()
pe = PROCESSENTRY32()
pe.dwSize = sizeof(PROCESSENTRY32)
success = windll.kernel32.Process32First(h_process, byref(pe))
while True:
if not success:
errcode = windll.kernel32.GetLastError()
if errcode == ERROR_NO_MORE_FILES:
# No more processes to iterate through, we're done here.
return
elif errcode == ERROR_INSUFFICIENT_BUFFER:
# This is likely because the file path is longer than the
# Windows limit. Just ignore it, it's likely not what we're
# looking for. We can fix this when it actually matters. (#8)
continue
raise WinError()
# The executable name would be encoded with the current code page if
# we're in ANSI mode (usually). Try to decode it into str/unicode,
# replacing invalid characters to be safe (not thoeratically necessary,
# I think). Note that we need to use 'mbcs' instead of encoding
# settings from sys because this is from the Windows API, not Python
# internals (which those settings reflect). (pypa/pipenv#3382)
executable = pe.szExeFile
if isinstance(executable, bytes):
executable = executable.decode('mbcs', 'replace')
info = {'executable': executable}
if pe.th32ParentProcessID:
info['parent_pid'] = pe.th32ParentProcessID
yield pe.th32ProcessID, info
success = windll.kernel32.Process32Next(h_process, byref(pe)) | ['def', '_iter_process', '(', ')', ':', '# TODO: Process32{First,Next} does not return full executable path, only', '# the name. To get the full path, Module32{First,Next} is needed, but that', '# does not contain parent process information. We probably need to call', '# BOTH to build the correct process tree.', 'h_process', '=', 'windll', '.', 'kernel32', '.', 'CreateToolhelp32Snapshot', '(', '2', ',', '# dwFlags=TH32CS_SNAPPROCESS (include all processes).', '0', ',', '# th32ProcessID=0 (the current process).', ')', 'if', 'h_process', '==', 'INVALID_HANDLE_VALUE', ':', 'raise', 'WinError', '(', ')', 'pe', '=', 'PROCESSENTRY32', '(', ')', 'pe', '.', 'dwSize', '=', 'sizeof', '(', 'PROCESSENTRY32', ')', 'success', '=', 'windll', '.', 'kernel32', '.', 'Process32First', '(', 'h_process', ',', 'byref', '(', 'pe', ')', ')', 'while', 'True', ':', 'if', 'not', 'success', ':', 'errcode', '=', 'windll', '.', 'kernel32', '.', 'GetLastError', '(', ')', 'if', 'errcode', '==', 'ERROR_NO_MORE_FILES', ':', "# No more processes to iterate through, we're done here.", 'return', 'elif', 'errcode', '==', 'ERROR_INSUFFICIENT_BUFFER', ':', '# This is likely because the file path is longer than the', "# Windows limit. Just ignore it, it's likely not what we're", '# looking for. We can fix this when it actually matters. (#8)', 'continue', 'raise', 'WinError', '(', ')', '# The executable name would be encoded with the current code page if', "# we're in ANSI mode (usually). Try to decode it into str/unicode,", '# replacing invalid characters to be safe (not thoeratically necessary,', "# I think). Note that we need to use 'mbcs' instead of encoding", '# settings from sys because this is from the Windows API, not Python', '# internals (which those settings reflect). (pypa/pipenv#3382)', 'executable', '=', 'pe', '.', 'szExeFile', 'if', 'isinstance', '(', 'executable', ',', 'bytes', ')', ':', 'executable', '=', 'executable', '.', 'decode', '(', "'mbcs'", ',', "'replace'", ')', 'info', '=', '{', "'executable'", ':', 'executable', '}', 'if', 'pe', '.', 'th32ParentProcessID', ':', 'info', '[', "'parent_pid'", ']', '=', 'pe', '.', 'th32ParentProcessID', 'yield', 'pe', '.', 'th32ProcessID', ',', 'info', 'success', '=', 'windll', '.', 'kernel32', '.', 'Process32Next', '(', 'h_process', ',', 'byref', '(', 'pe', ')', ')'] | Iterate through processes, yielding process ID and properties of each.
Example usage::
>>> for pid, info in _iter_process():
... print(pid, '->', info)
1509 -> {'parent_pid': 1201, 'executable': 'python.exe'} | ['Iterate', 'through', 'processes', 'yielding', 'process', 'ID', 'and', 'properties', 'of', 'each', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/shellingham/nt.py#L44-L93 |
8,677 | pandas-dev/pandas | pandas/core/indexing.py | _NDFrameIndexer._validate_read_indexer | def _validate_read_indexer(self, key, indexer, axis, raise_missing=False):
"""
Check that indexer can be used to return a result (e.g. at least one
element was found, unless the list of keys was actually empty).
Parameters
----------
key : list-like
Target labels (only used to show correct error message)
indexer: array-like of booleans
Indices corresponding to the key (with -1 indicating not found)
axis: int
Dimension on which the indexing is being made
raise_missing: bool
Whether to raise a KeyError if some labels are not found. Will be
removed in the future, and then this method will always behave as
if raise_missing=True.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True.
"""
ax = self.obj._get_axis(axis)
if len(key) == 0:
return
# Count missing values:
missing = (indexer < 0).sum()
if missing:
if missing == len(indexer):
raise KeyError(
"None of [{key}] are in the [{axis}]".format(
key=key, axis=self.obj._get_axis_name(axis)))
# We (temporarily) allow for some missing keys with .loc, except in
# some cases (e.g. setting) in which "raise_missing" will be False
if not(self.name == 'loc' and not raise_missing):
not_found = list(set(key) - set(ax))
raise KeyError("{} not in index".format(not_found))
# we skip the warning on Categorical/Interval
# as this check is actually done (check for
# non-missing values), but a bit later in the
# code, so we want to avoid warning & then
# just raising
_missing_key_warning = textwrap.dedent("""
Passing list-likes to .loc or [] with any missing label will raise
KeyError in the future, you can use .reindex() as an alternative.
See the documentation here:
https://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike""") # noqa
if not (ax.is_categorical() or ax.is_interval()):
warnings.warn(_missing_key_warning,
FutureWarning, stacklevel=6) | python | def _validate_read_indexer(self, key, indexer, axis, raise_missing=False):
"""
Check that indexer can be used to return a result (e.g. at least one
element was found, unless the list of keys was actually empty).
Parameters
----------
key : list-like
Target labels (only used to show correct error message)
indexer: array-like of booleans
Indices corresponding to the key (with -1 indicating not found)
axis: int
Dimension on which the indexing is being made
raise_missing: bool
Whether to raise a KeyError if some labels are not found. Will be
removed in the future, and then this method will always behave as
if raise_missing=True.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True.
"""
ax = self.obj._get_axis(axis)
if len(key) == 0:
return
# Count missing values:
missing = (indexer < 0).sum()
if missing:
if missing == len(indexer):
raise KeyError(
"None of [{key}] are in the [{axis}]".format(
key=key, axis=self.obj._get_axis_name(axis)))
# We (temporarily) allow for some missing keys with .loc, except in
# some cases (e.g. setting) in which "raise_missing" will be False
if not(self.name == 'loc' and not raise_missing):
not_found = list(set(key) - set(ax))
raise KeyError("{} not in index".format(not_found))
# we skip the warning on Categorical/Interval
# as this check is actually done (check for
# non-missing values), but a bit later in the
# code, so we want to avoid warning & then
# just raising
_missing_key_warning = textwrap.dedent("""
Passing list-likes to .loc or [] with any missing label will raise
KeyError in the future, you can use .reindex() as an alternative.
See the documentation here:
https://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike""") # noqa
if not (ax.is_categorical() or ax.is_interval()):
warnings.warn(_missing_key_warning,
FutureWarning, stacklevel=6) | ['def', '_validate_read_indexer', '(', 'self', ',', 'key', ',', 'indexer', ',', 'axis', ',', 'raise_missing', '=', 'False', ')', ':', 'ax', '=', 'self', '.', 'obj', '.', '_get_axis', '(', 'axis', ')', 'if', 'len', '(', 'key', ')', '==', '0', ':', 'return', '# Count missing values:', 'missing', '=', '(', 'indexer', '<', '0', ')', '.', 'sum', '(', ')', 'if', 'missing', ':', 'if', 'missing', '==', 'len', '(', 'indexer', ')', ':', 'raise', 'KeyError', '(', '"None of [{key}] are in the [{axis}]"', '.', 'format', '(', 'key', '=', 'key', ',', 'axis', '=', 'self', '.', 'obj', '.', '_get_axis_name', '(', 'axis', ')', ')', ')', '# We (temporarily) allow for some missing keys with .loc, except in', '# some cases (e.g. setting) in which "raise_missing" will be False', 'if', 'not', '(', 'self', '.', 'name', '==', "'loc'", 'and', 'not', 'raise_missing', ')', ':', 'not_found', '=', 'list', '(', 'set', '(', 'key', ')', '-', 'set', '(', 'ax', ')', ')', 'raise', 'KeyError', '(', '"{} not in index"', '.', 'format', '(', 'not_found', ')', ')', '# we skip the warning on Categorical/Interval', '# as this check is actually done (check for', '# non-missing values), but a bit later in the', '# code, so we want to avoid warning & then', '# just raising', '_missing_key_warning', '=', 'textwrap', '.', 'dedent', '(', '"""\n Passing list-likes to .loc or [] with any missing label will raise\n KeyError in the future, you can use .reindex() as an alternative.\n\n See the documentation here:\n https://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike"""', ')', '# noqa', 'if', 'not', '(', 'ax', '.', 'is_categorical', '(', ')', 'or', 'ax', '.', 'is_interval', '(', ')', ')', ':', 'warnings', '.', 'warn', '(', '_missing_key_warning', ',', 'FutureWarning', ',', 'stacklevel', '=', '6', ')'] | Check that indexer can be used to return a result (e.g. at least one
element was found, unless the list of keys was actually empty).
Parameters
----------
key : list-like
Target labels (only used to show correct error message)
indexer: array-like of booleans
Indices corresponding to the key (with -1 indicating not found)
axis: int
Dimension on which the indexing is being made
raise_missing: bool
Whether to raise a KeyError if some labels are not found. Will be
removed in the future, and then this method will always behave as
if raise_missing=True.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True. | ['Check', 'that', 'indexer', 'can', 'be', 'used', 'to', 'return', 'a', 'result', '(', 'e', '.', 'g', '.', 'at', 'least', 'one', 'element', 'was', 'found', 'unless', 'the', 'list', 'of', 'keys', 'was', 'actually', 'empty', ')', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L1213-L1273 |
8,678 | epfl-lts2/pygsp | pygsp/graphs/_io.py | IOMixIn.from_networkx | def from_networkx(cls, graph, weight='weight'):
r"""Import a graph from NetworkX.
Edge weights are retrieved as an edge attribute,
under the name specified by the ``weight`` parameter.
Signals are retrieved from node attributes,
and stored in the :attr:`signals` dictionary under the attribute name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
graph : :class:`networkx.Graph`
A NetworkX graph object.
weight : string or None, optional
The edge attribute that holds the numerical values used as the edge
weights. All edge weights are set to 1 if None, or not found.
Returns
-------
graph : :class:`~pygsp.graphs.Graph`
A PyGSP graph object.
Notes
-----
The nodes are ordered according to :meth:`networkx.Graph.nodes`.
In NetworkX, node attributes need not be set for every node.
If a node attribute is not set for a node, a NaN is assigned to the
corresponding signal for that node.
If the graph is a :class:`networkx.MultiGraph`, multiedges are
aggregated by summation.
See Also
--------
from_graphtool : import from graph-tool
load : load from a file
Examples
--------
>>> import networkx as nx
>>> graph = nx.Graph()
>>> graph.add_edge(1, 2, weight=0.2)
>>> graph.add_edge(2, 3, weight=0.9)
>>> graph.add_node(4, sig=3.1416)
>>> graph.nodes()
NodeView((1, 2, 3, 4))
>>> graph = graphs.Graph.from_networkx(graph)
>>> graph.W.toarray()
array([[0. , 0.2, 0. , 0. ],
[0.2, 0. , 0.9, 0. ],
[0. , 0.9, 0. , 0. ],
[0. , 0. , 0. , 0. ]])
>>> graph.signals
{'sig': array([ nan, nan, nan, 3.1416])}
"""
nx = _import_networkx()
from .graph import Graph
adjacency = nx.to_scipy_sparse_matrix(graph, weight=weight)
graph_pg = Graph(adjacency)
for i, node in enumerate(graph.nodes()):
for name in graph.nodes[node].keys():
try:
signal = graph_pg.signals[name]
except KeyError:
signal = np.full(graph_pg.n_vertices, np.nan)
graph_pg.set_signal(signal, name)
try:
signal[i] = graph.nodes[node][name]
except KeyError:
pass # attribute not set for node
graph_pg._join_signals()
return graph_pg | python | def from_networkx(cls, graph, weight='weight'):
r"""Import a graph from NetworkX.
Edge weights are retrieved as an edge attribute,
under the name specified by the ``weight`` parameter.
Signals are retrieved from node attributes,
and stored in the :attr:`signals` dictionary under the attribute name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
graph : :class:`networkx.Graph`
A NetworkX graph object.
weight : string or None, optional
The edge attribute that holds the numerical values used as the edge
weights. All edge weights are set to 1 if None, or not found.
Returns
-------
graph : :class:`~pygsp.graphs.Graph`
A PyGSP graph object.
Notes
-----
The nodes are ordered according to :meth:`networkx.Graph.nodes`.
In NetworkX, node attributes need not be set for every node.
If a node attribute is not set for a node, a NaN is assigned to the
corresponding signal for that node.
If the graph is a :class:`networkx.MultiGraph`, multiedges are
aggregated by summation.
See Also
--------
from_graphtool : import from graph-tool
load : load from a file
Examples
--------
>>> import networkx as nx
>>> graph = nx.Graph()
>>> graph.add_edge(1, 2, weight=0.2)
>>> graph.add_edge(2, 3, weight=0.9)
>>> graph.add_node(4, sig=3.1416)
>>> graph.nodes()
NodeView((1, 2, 3, 4))
>>> graph = graphs.Graph.from_networkx(graph)
>>> graph.W.toarray()
array([[0. , 0.2, 0. , 0. ],
[0.2, 0. , 0.9, 0. ],
[0. , 0.9, 0. , 0. ],
[0. , 0. , 0. , 0. ]])
>>> graph.signals
{'sig': array([ nan, nan, nan, 3.1416])}
"""
nx = _import_networkx()
from .graph import Graph
adjacency = nx.to_scipy_sparse_matrix(graph, weight=weight)
graph_pg = Graph(adjacency)
for i, node in enumerate(graph.nodes()):
for name in graph.nodes[node].keys():
try:
signal = graph_pg.signals[name]
except KeyError:
signal = np.full(graph_pg.n_vertices, np.nan)
graph_pg.set_signal(signal, name)
try:
signal[i] = graph.nodes[node][name]
except KeyError:
pass # attribute not set for node
graph_pg._join_signals()
return graph_pg | ['def', 'from_networkx', '(', 'cls', ',', 'graph', ',', 'weight', '=', "'weight'", ')', ':', 'nx', '=', '_import_networkx', '(', ')', 'from', '.', 'graph', 'import', 'Graph', 'adjacency', '=', 'nx', '.', 'to_scipy_sparse_matrix', '(', 'graph', ',', 'weight', '=', 'weight', ')', 'graph_pg', '=', 'Graph', '(', 'adjacency', ')', 'for', 'i', ',', 'node', 'in', 'enumerate', '(', 'graph', '.', 'nodes', '(', ')', ')', ':', 'for', 'name', 'in', 'graph', '.', 'nodes', '[', 'node', ']', '.', 'keys', '(', ')', ':', 'try', ':', 'signal', '=', 'graph_pg', '.', 'signals', '[', 'name', ']', 'except', 'KeyError', ':', 'signal', '=', 'np', '.', 'full', '(', 'graph_pg', '.', 'n_vertices', ',', 'np', '.', 'nan', ')', 'graph_pg', '.', 'set_signal', '(', 'signal', ',', 'name', ')', 'try', ':', 'signal', '[', 'i', ']', '=', 'graph', '.', 'nodes', '[', 'node', ']', '[', 'name', ']', 'except', 'KeyError', ':', 'pass', '# attribute not set for node', 'graph_pg', '.', '_join_signals', '(', ')', 'return', 'graph_pg'] | r"""Import a graph from NetworkX.
Edge weights are retrieved as an edge attribute,
under the name specified by the ``weight`` parameter.
Signals are retrieved from node attributes,
and stored in the :attr:`signals` dictionary under the attribute name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
graph : :class:`networkx.Graph`
A NetworkX graph object.
weight : string or None, optional
The edge attribute that holds the numerical values used as the edge
weights. All edge weights are set to 1 if None, or not found.
Returns
-------
graph : :class:`~pygsp.graphs.Graph`
A PyGSP graph object.
Notes
-----
The nodes are ordered according to :meth:`networkx.Graph.nodes`.
In NetworkX, node attributes need not be set for every node.
If a node attribute is not set for a node, a NaN is assigned to the
corresponding signal for that node.
If the graph is a :class:`networkx.MultiGraph`, multiedges are
aggregated by summation.
See Also
--------
from_graphtool : import from graph-tool
load : load from a file
Examples
--------
>>> import networkx as nx
>>> graph = nx.Graph()
>>> graph.add_edge(1, 2, weight=0.2)
>>> graph.add_edge(2, 3, weight=0.9)
>>> graph.add_node(4, sig=3.1416)
>>> graph.nodes()
NodeView((1, 2, 3, 4))
>>> graph = graphs.Graph.from_networkx(graph)
>>> graph.W.toarray()
array([[0. , 0.2, 0. , 0. ],
[0.2, 0. , 0.9, 0. ],
[0. , 0.9, 0. , 0. ],
[0. , 0. , 0. , 0. ]])
>>> graph.signals
{'sig': array([ nan, nan, nan, 3.1416])} | ['r', 'Import', 'a', 'graph', 'from', 'NetworkX', '.'] | train | https://github.com/epfl-lts2/pygsp/blob/8ce5bde39206129287375af24fdbcd7edddca8c5/pygsp/graphs/_io.py#L252-L330 |
8,679 | StackStorm/pybind | pybind/slxos/v17r_1_01a/interface/port_channel/qos/random_detect/__init__.py | random_detect._set_traffic_class | def _set_traffic_class(self, v, load=False):
"""
Setter method for traffic_class, mapped from YANG variable /interface/port_channel/qos/random_detect/traffic_class (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_traffic_class() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("red_tc_value red_dp_value",traffic_class.traffic_class, yang_name="traffic-class", rest_name="traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='red-tc-value red-dp-value', extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}), is_container='list', yang_name="traffic-class", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """traffic_class must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("red_tc_value red_dp_value",traffic_class.traffic_class, yang_name="traffic-class", rest_name="traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='red-tc-value red-dp-value', extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}), is_container='list', yang_name="traffic-class", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""",
})
self.__traffic_class = t
if hasattr(self, '_set'):
self._set() | python | def _set_traffic_class(self, v, load=False):
"""
Setter method for traffic_class, mapped from YANG variable /interface/port_channel/qos/random_detect/traffic_class (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_traffic_class() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("red_tc_value red_dp_value",traffic_class.traffic_class, yang_name="traffic-class", rest_name="traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='red-tc-value red-dp-value', extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}), is_container='list', yang_name="traffic-class", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """traffic_class must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("red_tc_value red_dp_value",traffic_class.traffic_class, yang_name="traffic-class", rest_name="traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='red-tc-value red-dp-value', extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}), is_container='list', yang_name="traffic-class", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""",
})
self.__traffic_class = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_traffic_class', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'YANGListType', '(', '"red_tc_value red_dp_value"', ',', 'traffic_class', '.', 'traffic_class', ',', 'yang_name', '=', '"traffic-class"', ',', 'rest_name', '=', '"traffic-class"', ',', 'parent', '=', 'self', ',', 'is_container', '=', "'list'", ',', 'user_ordered', '=', 'False', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'yang_keys', '=', "'red-tc-value red-dp-value'", ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'traffic-class to configure RED on'", ',', "u'cli-no-key-completion'", ':', 'None', ',', "u'cli-suppress-mode'", ':', 'None', ',', "u'cli-suppress-no'", ':', 'None', ',', "u'cli-suppress-list-no'", ':', 'None', ',', "u'cli-run-template-enter'", ':', "u'$(.?:)'", ',', "u'cli-sequence-commands'", ':', 'None', ',', "u'cli-suppress-key-abbreviation'", ':', 'None', ',', "u'cli-incomplete-command'", ':', 'None', '}', '}', ')', ',', 'is_container', '=', "'list'", ',', 'yang_name', '=', '"traffic-class"', ',', 'rest_name', '=', '"traffic-class"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'traffic-class to configure RED on'", ',', "u'cli-no-key-completion'", ':', 'None', ',', "u'cli-suppress-mode'", ':', 'None', ',', "u'cli-suppress-no'", ':', 'None', ',', "u'cli-suppress-list-no'", ':', 'None', ',', "u'cli-run-template-enter'", ':', "u'$(.?:)'", ',', "u'cli-sequence-commands'", ':', 'None', ',', "u'cli-suppress-key-abbreviation'", ':', 'None', ',', "u'cli-incomplete-command'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-qos-mls'", ',', 'defining_module', '=', "'brocade-qos-mls'", ',', 'yang_type', '=', "'list'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""traffic_class must be of a type compatible with list"""', ',', "'defined-type'", ':', '"list"', ',', "'generated-type'", ':', '"""YANGDynClass(base=YANGListType("red_tc_value red_dp_value",traffic_class.traffic_class, yang_name="traffic-class", rest_name="traffic-class", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'red-tc-value red-dp-value\', extensions={u\'tailf-common\': {u\'info\': u\'traffic-class to configure RED on\', u\'cli-no-key-completion\': None, u\'cli-suppress-mode\': None, u\'cli-suppress-no\': None, u\'cli-suppress-list-no\': None, u\'cli-run-template-enter\': u\'$(.?:)\', u\'cli-sequence-commands\': None, u\'cli-suppress-key-abbreviation\': None, u\'cli-incomplete-command\': None}}), is_container=\'list\', yang_name="traffic-class", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'traffic-class to configure RED on\', u\'cli-no-key-completion\': None, u\'cli-suppress-mode\': None, u\'cli-suppress-no\': None, u\'cli-suppress-list-no\': None, u\'cli-run-template-enter\': u\'$(.?:)\', u\'cli-sequence-commands\': None, u\'cli-suppress-key-abbreviation\': None, u\'cli-incomplete-command\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-qos-mls\', defining_module=\'brocade-qos-mls\', yang_type=\'list\', is_config=True)"""', ',', '}', ')', 'self', '.', '__traffic_class', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for traffic_class, mapped from YANG variable /interface/port_channel/qos/random_detect/traffic_class (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_traffic_class() directly. | ['Setter', 'method', 'for', 'traffic_class', 'mapped', 'from', 'YANG', 'variable', '/', 'interface', '/', 'port_channel', '/', 'qos', '/', 'random_detect', '/', 'traffic_class', '(', 'list', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_traffic_class', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_traffic_class', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/interface/port_channel/qos/random_detect/__init__.py#L92-L113 |
8,680 | Opentrons/opentrons | scripts/python_build_utils.py | dump_br_version | def dump_br_version(project):
""" Dump an enhanced version json including
- The version from package.json
- The current branch (if it can be found)
- The current sha
"""
normalized = get_version(project)
sha = subprocess.check_output(
['git', 'rev-parse', 'HEAD'], cwd=HERE).strip()
branch = subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'], cwd=HERE).strip()
pref = br_version_prefixes[project]
return json.dumps({pref+'_version': normalized,
pref+'_sha': sha,
pref+'_branch': branch}) | python | def dump_br_version(project):
""" Dump an enhanced version json including
- The version from package.json
- The current branch (if it can be found)
- The current sha
"""
normalized = get_version(project)
sha = subprocess.check_output(
['git', 'rev-parse', 'HEAD'], cwd=HERE).strip()
branch = subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'], cwd=HERE).strip()
pref = br_version_prefixes[project]
return json.dumps({pref+'_version': normalized,
pref+'_sha': sha,
pref+'_branch': branch}) | ['def', 'dump_br_version', '(', 'project', ')', ':', 'normalized', '=', 'get_version', '(', 'project', ')', 'sha', '=', 'subprocess', '.', 'check_output', '(', '[', "'git'", ',', "'rev-parse'", ',', "'HEAD'", ']', ',', 'cwd', '=', 'HERE', ')', '.', 'strip', '(', ')', 'branch', '=', 'subprocess', '.', 'check_output', '(', '[', "'git'", ',', "'rev-parse'", ',', "'--abbrev-ref'", ',', "'HEAD'", ']', ',', 'cwd', '=', 'HERE', ')', '.', 'strip', '(', ')', 'pref', '=', 'br_version_prefixes', '[', 'project', ']', 'return', 'json', '.', 'dumps', '(', '{', 'pref', '+', "'_version'", ':', 'normalized', ',', 'pref', '+', "'_sha'", ':', 'sha', ',', 'pref', '+', "'_branch'", ':', 'branch', '}', ')'] | Dump an enhanced version json including
- The version from package.json
- The current branch (if it can be found)
- The current sha | ['Dump', 'an', 'enhanced', 'version', 'json', 'including', '-', 'The', 'version', 'from', 'package', '.', 'json', '-', 'The', 'current', 'branch', '(', 'if', 'it', 'can', 'be', 'found', ')', '-', 'The', 'current', 'sha'] | train | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/scripts/python_build_utils.py#L47-L61 |
8,681 | jcrist/skein | skein/ui.py | ProxiedPage.address | def address(self):
"""The full proxied address to this page"""
path = urlsplit(self.target).path
suffix = '/' if not path or path.endswith('/') else ''
return '%s%s/%s%s' % (self._ui_address[:-1], self._proxy_prefix,
self.route, suffix) | python | def address(self):
"""The full proxied address to this page"""
path = urlsplit(self.target).path
suffix = '/' if not path or path.endswith('/') else ''
return '%s%s/%s%s' % (self._ui_address[:-1], self._proxy_prefix,
self.route, suffix) | ['def', 'address', '(', 'self', ')', ':', 'path', '=', 'urlsplit', '(', 'self', '.', 'target', ')', '.', 'path', 'suffix', '=', "'/'", 'if', 'not', 'path', 'or', 'path', '.', 'endswith', '(', "'/'", ')', 'else', "''", 'return', "'%s%s/%s%s'", '%', '(', 'self', '.', '_ui_address', '[', ':', '-', '1', ']', ',', 'self', '.', '_proxy_prefix', ',', 'self', '.', 'route', ',', 'suffix', ')'] | The full proxied address to this page | ['The', 'full', 'proxied', 'address', 'to', 'this', 'page'] | train | https://github.com/jcrist/skein/blob/16f8b1d3b3d9f79f36e2f152e45893339a1793e8/skein/ui.py#L43-L48 |
8,682 | guaix-ucm/pyemir | emirdrp/processing/wavecal/slitlet2d.py | Slitlet2D.extract_slitlet2d | def extract_slitlet2d(self, image_2k2k):
"""Extract slitlet 2d image from image with original EMIR dimensions.
Parameters
----------
image_2k2k : numpy array
Original image (dimensions EMIR_NAXIS1 * EMIR_NAXIS2)
Returns
-------
slitlet2d : numpy array
Image corresponding to the slitlet region defined by its
bounding box.
"""
# protections
naxis2, naxis1 = image_2k2k.shape
if naxis1 != EMIR_NAXIS1:
raise ValueError('Unexpected naxis1')
if naxis2 != EMIR_NAXIS2:
raise ValueError('Unexpected naxis2')
# extract slitlet region
slitlet2d = image_2k2k[(self.bb_ns1_orig - 1):self.bb_ns2_orig,
(self.bb_nc1_orig - 1):self.bb_nc2_orig]
# transform to float
slitlet2d = slitlet2d.astype(np.float)
# display slitlet2d with boundaries and middle spectrum trail
if abs(self.debugplot) in [21, 22]:
self.ximshow_unrectified(slitlet2d)
# return slitlet image
return slitlet2d | python | def extract_slitlet2d(self, image_2k2k):
"""Extract slitlet 2d image from image with original EMIR dimensions.
Parameters
----------
image_2k2k : numpy array
Original image (dimensions EMIR_NAXIS1 * EMIR_NAXIS2)
Returns
-------
slitlet2d : numpy array
Image corresponding to the slitlet region defined by its
bounding box.
"""
# protections
naxis2, naxis1 = image_2k2k.shape
if naxis1 != EMIR_NAXIS1:
raise ValueError('Unexpected naxis1')
if naxis2 != EMIR_NAXIS2:
raise ValueError('Unexpected naxis2')
# extract slitlet region
slitlet2d = image_2k2k[(self.bb_ns1_orig - 1):self.bb_ns2_orig,
(self.bb_nc1_orig - 1):self.bb_nc2_orig]
# transform to float
slitlet2d = slitlet2d.astype(np.float)
# display slitlet2d with boundaries and middle spectrum trail
if abs(self.debugplot) in [21, 22]:
self.ximshow_unrectified(slitlet2d)
# return slitlet image
return slitlet2d | ['def', 'extract_slitlet2d', '(', 'self', ',', 'image_2k2k', ')', ':', '# protections', 'naxis2', ',', 'naxis1', '=', 'image_2k2k', '.', 'shape', 'if', 'naxis1', '!=', 'EMIR_NAXIS1', ':', 'raise', 'ValueError', '(', "'Unexpected naxis1'", ')', 'if', 'naxis2', '!=', 'EMIR_NAXIS2', ':', 'raise', 'ValueError', '(', "'Unexpected naxis2'", ')', '# extract slitlet region', 'slitlet2d', '=', 'image_2k2k', '[', '(', 'self', '.', 'bb_ns1_orig', '-', '1', ')', ':', 'self', '.', 'bb_ns2_orig', ',', '(', 'self', '.', 'bb_nc1_orig', '-', '1', ')', ':', 'self', '.', 'bb_nc2_orig', ']', '# transform to float', 'slitlet2d', '=', 'slitlet2d', '.', 'astype', '(', 'np', '.', 'float', ')', '# display slitlet2d with boundaries and middle spectrum trail', 'if', 'abs', '(', 'self', '.', 'debugplot', ')', 'in', '[', '21', ',', '22', ']', ':', 'self', '.', 'ximshow_unrectified', '(', 'slitlet2d', ')', '# return slitlet image', 'return', 'slitlet2d'] | Extract slitlet 2d image from image with original EMIR dimensions.
Parameters
----------
image_2k2k : numpy array
Original image (dimensions EMIR_NAXIS1 * EMIR_NAXIS2)
Returns
-------
slitlet2d : numpy array
Image corresponding to the slitlet region defined by its
bounding box. | ['Extract', 'slitlet', '2d', 'image', 'from', 'image', 'with', 'original', 'EMIR', 'dimensions', '.'] | train | https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/slitlet2d.py#L316-L351 |
8,683 | noahbenson/neuropythy | neuropythy/geometry/util.py | prism_barycentric_coordinates | def prism_barycentric_coordinates(tri1, tri2, pt):
'''
prism_barycentric_coordinates(tri1, tri2, point) yields a list of weights for each vertex
in the given tetrahedron in the same order as the vertices given. If all weights are 0, then
the point is not inside the tetrahedron. The returned weights are (a,b,d) in a numpy array;
the values a, b, and c are the barycentric coordinates corresponding to the three points of
the triangles (where c = (1 - a - b) and the value d is the fractional distance (in the range
[0,1]) of the point between tri1 (d=0) and tri2 (d=1).
'''
pt = np.asarray(pt)
tri1 = np.asarray(tri1)
tri2 = np.asarray(tri2)
(tri1,tri2) = [
(np.transpose(tri, (1,0) if len(tri.shape) == 2 else (2,0,1)) if tri.shape[0] != 3 else
np.transpose(tri, (0,2,1)) if tri.shape[1] != 3 else
tri)
for tri in (tri1,tri2)]
pt = pt.T if pt.shape[0] != 3 else pt
# get the individual tetrahedron bc coordinates
bcs1 = tetrahedral_barycentric_coordinates([tri1[0], tri1[1], tri1[2], tri2[0]], pt)
bcs2 = tetrahedral_barycentric_coordinates([tri1[1], tri1[2], tri2[0], tri2[1]], pt)
bcs3 = tetrahedral_barycentric_coordinates([tri1[2], tri2[0], tri2[1], tri2[2]], pt)
bcs4 = tetrahedral_barycentric_coordinates([tri1[0], tri1[1], tri2[0], tri2[1]], pt)
bcs5 = tetrahedral_barycentric_coordinates([tri1[0], tri1[2], tri2[0], tri2[2]], pt)
bcs6 = tetrahedral_barycentric_coordinates([tri1[1], tri1[2], tri2[1], tri2[2]], pt)
bcs = ((bcs1[0] + bcs4[0] + bcs5[0],
bcs1[1] + bcs2[0] + bcs4[1] + bcs6[0],
bcs1[2] + bcs2[1] + bcs3[0] + bcs5[1] + bcs6[1]),
(bcs1[3] + bcs2[2] + bcs3[1] + bcs4[2] + bcs5[2],
bcs2[3] + bcs3[2] + bcs4[3] + bcs6[2],
bcs3[3] + bcs5[3] + bcs6[3]))
# convert into (a,b,c,d) coordinates
abc = np.sum(bcs, axis=0)
d = np.sum(bcs[1], axis=0)
return np.asarray((abc[0], abc[1], d)) | python | def prism_barycentric_coordinates(tri1, tri2, pt):
'''
prism_barycentric_coordinates(tri1, tri2, point) yields a list of weights for each vertex
in the given tetrahedron in the same order as the vertices given. If all weights are 0, then
the point is not inside the tetrahedron. The returned weights are (a,b,d) in a numpy array;
the values a, b, and c are the barycentric coordinates corresponding to the three points of
the triangles (where c = (1 - a - b) and the value d is the fractional distance (in the range
[0,1]) of the point between tri1 (d=0) and tri2 (d=1).
'''
pt = np.asarray(pt)
tri1 = np.asarray(tri1)
tri2 = np.asarray(tri2)
(tri1,tri2) = [
(np.transpose(tri, (1,0) if len(tri.shape) == 2 else (2,0,1)) if tri.shape[0] != 3 else
np.transpose(tri, (0,2,1)) if tri.shape[1] != 3 else
tri)
for tri in (tri1,tri2)]
pt = pt.T if pt.shape[0] != 3 else pt
# get the individual tetrahedron bc coordinates
bcs1 = tetrahedral_barycentric_coordinates([tri1[0], tri1[1], tri1[2], tri2[0]], pt)
bcs2 = tetrahedral_barycentric_coordinates([tri1[1], tri1[2], tri2[0], tri2[1]], pt)
bcs3 = tetrahedral_barycentric_coordinates([tri1[2], tri2[0], tri2[1], tri2[2]], pt)
bcs4 = tetrahedral_barycentric_coordinates([tri1[0], tri1[1], tri2[0], tri2[1]], pt)
bcs5 = tetrahedral_barycentric_coordinates([tri1[0], tri1[2], tri2[0], tri2[2]], pt)
bcs6 = tetrahedral_barycentric_coordinates([tri1[1], tri1[2], tri2[1], tri2[2]], pt)
bcs = ((bcs1[0] + bcs4[0] + bcs5[0],
bcs1[1] + bcs2[0] + bcs4[1] + bcs6[0],
bcs1[2] + bcs2[1] + bcs3[0] + bcs5[1] + bcs6[1]),
(bcs1[3] + bcs2[2] + bcs3[1] + bcs4[2] + bcs5[2],
bcs2[3] + bcs3[2] + bcs4[3] + bcs6[2],
bcs3[3] + bcs5[3] + bcs6[3]))
# convert into (a,b,c,d) coordinates
abc = np.sum(bcs, axis=0)
d = np.sum(bcs[1], axis=0)
return np.asarray((abc[0], abc[1], d)) | ['def', 'prism_barycentric_coordinates', '(', 'tri1', ',', 'tri2', ',', 'pt', ')', ':', 'pt', '=', 'np', '.', 'asarray', '(', 'pt', ')', 'tri1', '=', 'np', '.', 'asarray', '(', 'tri1', ')', 'tri2', '=', 'np', '.', 'asarray', '(', 'tri2', ')', '(', 'tri1', ',', 'tri2', ')', '=', '[', '(', 'np', '.', 'transpose', '(', 'tri', ',', '(', '1', ',', '0', ')', 'if', 'len', '(', 'tri', '.', 'shape', ')', '==', '2', 'else', '(', '2', ',', '0', ',', '1', ')', ')', 'if', 'tri', '.', 'shape', '[', '0', ']', '!=', '3', 'else', 'np', '.', 'transpose', '(', 'tri', ',', '(', '0', ',', '2', ',', '1', ')', ')', 'if', 'tri', '.', 'shape', '[', '1', ']', '!=', '3', 'else', 'tri', ')', 'for', 'tri', 'in', '(', 'tri1', ',', 'tri2', ')', ']', 'pt', '=', 'pt', '.', 'T', 'if', 'pt', '.', 'shape', '[', '0', ']', '!=', '3', 'else', 'pt', '# get the individual tetrahedron bc coordinates', 'bcs1', '=', 'tetrahedral_barycentric_coordinates', '(', '[', 'tri1', '[', '0', ']', ',', 'tri1', '[', '1', ']', ',', 'tri1', '[', '2', ']', ',', 'tri2', '[', '0', ']', ']', ',', 'pt', ')', 'bcs2', '=', 'tetrahedral_barycentric_coordinates', '(', '[', 'tri1', '[', '1', ']', ',', 'tri1', '[', '2', ']', ',', 'tri2', '[', '0', ']', ',', 'tri2', '[', '1', ']', ']', ',', 'pt', ')', 'bcs3', '=', 'tetrahedral_barycentric_coordinates', '(', '[', 'tri1', '[', '2', ']', ',', 'tri2', '[', '0', ']', ',', 'tri2', '[', '1', ']', ',', 'tri2', '[', '2', ']', ']', ',', 'pt', ')', 'bcs4', '=', 'tetrahedral_barycentric_coordinates', '(', '[', 'tri1', '[', '0', ']', ',', 'tri1', '[', '1', ']', ',', 'tri2', '[', '0', ']', ',', 'tri2', '[', '1', ']', ']', ',', 'pt', ')', 'bcs5', '=', 'tetrahedral_barycentric_coordinates', '(', '[', 'tri1', '[', '0', ']', ',', 'tri1', '[', '2', ']', ',', 'tri2', '[', '0', ']', ',', 'tri2', '[', '2', ']', ']', ',', 'pt', ')', 'bcs6', '=', 'tetrahedral_barycentric_coordinates', '(', '[', 'tri1', '[', '1', ']', ',', 'tri1', '[', '2', ']', ',', 'tri2', '[', '1', ']', ',', 'tri2', '[', '2', ']', ']', ',', 'pt', ')', 'bcs', '=', '(', '(', 'bcs1', '[', '0', ']', '+', 'bcs4', '[', '0', ']', '+', 'bcs5', '[', '0', ']', ',', 'bcs1', '[', '1', ']', '+', 'bcs2', '[', '0', ']', '+', 'bcs4', '[', '1', ']', '+', 'bcs6', '[', '0', ']', ',', 'bcs1', '[', '2', ']', '+', 'bcs2', '[', '1', ']', '+', 'bcs3', '[', '0', ']', '+', 'bcs5', '[', '1', ']', '+', 'bcs6', '[', '1', ']', ')', ',', '(', 'bcs1', '[', '3', ']', '+', 'bcs2', '[', '2', ']', '+', 'bcs3', '[', '1', ']', '+', 'bcs4', '[', '2', ']', '+', 'bcs5', '[', '2', ']', ',', 'bcs2', '[', '3', ']', '+', 'bcs3', '[', '2', ']', '+', 'bcs4', '[', '3', ']', '+', 'bcs6', '[', '2', ']', ',', 'bcs3', '[', '3', ']', '+', 'bcs5', '[', '3', ']', '+', 'bcs6', '[', '3', ']', ')', ')', '# convert into (a,b,c,d) coordinates', 'abc', '=', 'np', '.', 'sum', '(', 'bcs', ',', 'axis', '=', '0', ')', 'd', '=', 'np', '.', 'sum', '(', 'bcs', '[', '1', ']', ',', 'axis', '=', '0', ')', 'return', 'np', '.', 'asarray', '(', '(', 'abc', '[', '0', ']', ',', 'abc', '[', '1', ']', ',', 'd', ')', ')'] | prism_barycentric_coordinates(tri1, tri2, point) yields a list of weights for each vertex
in the given tetrahedron in the same order as the vertices given. If all weights are 0, then
the point is not inside the tetrahedron. The returned weights are (a,b,d) in a numpy array;
the values a, b, and c are the barycentric coordinates corresponding to the three points of
the triangles (where c = (1 - a - b) and the value d is the fractional distance (in the range
[0,1]) of the point between tri1 (d=0) and tri2 (d=1). | ['prism_barycentric_coordinates', '(', 'tri1', 'tri2', 'point', ')', 'yields', 'a', 'list', 'of', 'weights', 'for', 'each', 'vertex', 'in', 'the', 'given', 'tetrahedron', 'in', 'the', 'same', 'order', 'as', 'the', 'vertices', 'given', '.', 'If', 'all', 'weights', 'are', '0', 'then', 'the', 'point', 'is', 'not', 'inside', 'the', 'tetrahedron', '.', 'The', 'returned', 'weights', 'are', '(', 'a', 'b', 'd', ')', 'in', 'a', 'numpy', 'array', ';', 'the', 'values', 'a', 'b', 'and', 'c', 'are', 'the', 'barycentric', 'coordinates', 'corresponding', 'to', 'the', 'three', 'points', 'of', 'the', 'triangles', '(', 'where', 'c', '=', '(', '1', '-', 'a', '-', 'b', ')', 'and', 'the', 'value', 'd', 'is', 'the', 'fractional', 'distance', '(', 'in', 'the', 'range', '[', '0', '1', ']', ')', 'of', 'the', 'point', 'between', 'tri1', '(', 'd', '=', '0', ')', 'and', 'tri2', '(', 'd', '=', '1', ')', '.'] | train | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/geometry/util.py#L693-L727 |
8,684 | hozn/stravalib | stravalib/client.py | Client.get_gear | def get_gear(self, gear_id):
"""
Get details for an item of gear.
http://strava.github.io/api/v3/gear/#show
:param gear_id: The gear id.
:type gear_id: str
:return: The Bike or Shoe subclass object.
:rtype: :class:`stravalib.model.Gear`
"""
return model.Gear.deserialize(self.protocol.get('/gear/{id}', id=gear_id)) | python | def get_gear(self, gear_id):
"""
Get details for an item of gear.
http://strava.github.io/api/v3/gear/#show
:param gear_id: The gear id.
:type gear_id: str
:return: The Bike or Shoe subclass object.
:rtype: :class:`stravalib.model.Gear`
"""
return model.Gear.deserialize(self.protocol.get('/gear/{id}', id=gear_id)) | ['def', 'get_gear', '(', 'self', ',', 'gear_id', ')', ':', 'return', 'model', '.', 'Gear', '.', 'deserialize', '(', 'self', '.', 'protocol', '.', 'get', '(', "'/gear/{id}'", ',', 'id', '=', 'gear_id', ')', ')'] | Get details for an item of gear.
http://strava.github.io/api/v3/gear/#show
:param gear_id: The gear id.
:type gear_id: str
:return: The Bike or Shoe subclass object.
:rtype: :class:`stravalib.model.Gear` | ['Get', 'details', 'for', 'an', 'item', 'of', 'gear', '.'] | train | https://github.com/hozn/stravalib/blob/5500ebc39e0bf4706bb1ca4c27b25e56becaaa5f/stravalib/client.py#L871-L883 |
8,685 | atlassian-api/atlassian-python-api | atlassian/confluence.py | Confluence.delete_page_property | def delete_page_property(self, page_id, page_property):
"""
Delete the page (content) property e.g. delete key of hash
:param page_id: content_id format
:param page_property: key of property
:return:
"""
url = 'rest/api/content/{page_id}/property/{page_property}'.format(page_id=page_id,
page_property=str(page_property))
return self.delete(path=url) | python | def delete_page_property(self, page_id, page_property):
"""
Delete the page (content) property e.g. delete key of hash
:param page_id: content_id format
:param page_property: key of property
:return:
"""
url = 'rest/api/content/{page_id}/property/{page_property}'.format(page_id=page_id,
page_property=str(page_property))
return self.delete(path=url) | ['def', 'delete_page_property', '(', 'self', ',', 'page_id', ',', 'page_property', ')', ':', 'url', '=', "'rest/api/content/{page_id}/property/{page_property}'", '.', 'format', '(', 'page_id', '=', 'page_id', ',', 'page_property', '=', 'str', '(', 'page_property', ')', ')', 'return', 'self', '.', 'delete', '(', 'path', '=', 'url', ')'] | Delete the page (content) property e.g. delete key of hash
:param page_id: content_id format
:param page_property: key of property
:return: | ['Delete', 'the', 'page', '(', 'content', ')', 'property', 'e', '.', 'g', '.', 'delete', 'key', 'of', 'hash', ':', 'param', 'page_id', ':', 'content_id', 'format', ':', 'param', 'page_property', ':', 'key', 'of', 'property', ':', 'return', ':'] | train | https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/confluence.py#L540-L549 |
8,686 | Workiva/furious | furious/async.py | AsyncResult._payload_to_dict | def _payload_to_dict(self):
"""When an error status the payload is holding an AsyncException that
is converted to a serializable dict.
"""
if self.status != self.ERROR or not self.payload:
return self.payload
import traceback
return {
"error": self.payload.error,
"args": self.payload.args,
"traceback": traceback.format_exception(*self.payload.traceback)
} | python | def _payload_to_dict(self):
"""When an error status the payload is holding an AsyncException that
is converted to a serializable dict.
"""
if self.status != self.ERROR or not self.payload:
return self.payload
import traceback
return {
"error": self.payload.error,
"args": self.payload.args,
"traceback": traceback.format_exception(*self.payload.traceback)
} | ['def', '_payload_to_dict', '(', 'self', ')', ':', 'if', 'self', '.', 'status', '!=', 'self', '.', 'ERROR', 'or', 'not', 'self', '.', 'payload', ':', 'return', 'self', '.', 'payload', 'import', 'traceback', 'return', '{', '"error"', ':', 'self', '.', 'payload', '.', 'error', ',', '"args"', ':', 'self', '.', 'payload', '.', 'args', ',', '"traceback"', ':', 'traceback', '.', 'format_exception', '(', '*', 'self', '.', 'payload', '.', 'traceback', ')', '}'] | When an error status the payload is holding an AsyncException that
is converted to a serializable dict. | ['When', 'an', 'error', 'status', 'the', 'payload', 'is', 'holding', 'an', 'AsyncException', 'that', 'is', 'converted', 'to', 'a', 'serializable', 'dict', '.'] | train | https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/async.py#L574-L587 |
8,687 | bitesofcode/projexui | projexui/menus/xrecentfilesmenu.py | XRecentFilesMenu.refresh | def refresh( self ):
"""
Clears out the actions for this menu and then loads the files.
"""
self.clear()
for i, filename in enumerate(self.filenames()):
name = '%i. %s' % (i+1, os.path.basename(filename))
action = self.addAction(name)
action.setData(wrapVariant(filename)) | python | def refresh( self ):
"""
Clears out the actions for this menu and then loads the files.
"""
self.clear()
for i, filename in enumerate(self.filenames()):
name = '%i. %s' % (i+1, os.path.basename(filename))
action = self.addAction(name)
action.setData(wrapVariant(filename)) | ['def', 'refresh', '(', 'self', ')', ':', 'self', '.', 'clear', '(', ')', 'for', 'i', ',', 'filename', 'in', 'enumerate', '(', 'self', '.', 'filenames', '(', ')', ')', ':', 'name', '=', "'%i. %s'", '%', '(', 'i', '+', '1', ',', 'os', '.', 'path', '.', 'basename', '(', 'filename', ')', ')', 'action', '=', 'self', '.', 'addAction', '(', 'name', ')', 'action', '.', 'setData', '(', 'wrapVariant', '(', 'filename', ')', ')'] | Clears out the actions for this menu and then loads the files. | ['Clears', 'out', 'the', 'actions', 'for', 'this', 'menu', 'and', 'then', 'loads', 'the', 'files', '.'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/menus/xrecentfilesmenu.py#L84-L93 |
8,688 | jwkvam/bowtie | bowtie/_component.py | json_conversion | def json_conversion(obj: Any) -> JSON:
"""Encode additional objects to JSON."""
try:
# numpy isn't an explicit dependency of bowtie
# so we can't assume it's available
import numpy as np
if isinstance(obj, (np.ndarray, np.generic)):
return obj.tolist()
except ImportError:
pass
try:
# pandas isn't an explicit dependency of bowtie
# so we can't assume it's available
import pandas as pd
if isinstance(obj, pd.DatetimeIndex):
return [x.isoformat() for x in obj.to_pydatetime()]
if isinstance(obj, pd.Index):
return obj.tolist()
if isinstance(obj, pd.Series):
try:
return [x.isoformat() for x in obj.dt.to_pydatetime()]
except AttributeError:
return obj.tolist()
except ImportError:
pass
if isinstance(obj, (datetime, time, date)):
return obj.isoformat()
raise TypeError('Not sure how to serialize {} of type {}'.format(obj, type(obj))) | python | def json_conversion(obj: Any) -> JSON:
"""Encode additional objects to JSON."""
try:
# numpy isn't an explicit dependency of bowtie
# so we can't assume it's available
import numpy as np
if isinstance(obj, (np.ndarray, np.generic)):
return obj.tolist()
except ImportError:
pass
try:
# pandas isn't an explicit dependency of bowtie
# so we can't assume it's available
import pandas as pd
if isinstance(obj, pd.DatetimeIndex):
return [x.isoformat() for x in obj.to_pydatetime()]
if isinstance(obj, pd.Index):
return obj.tolist()
if isinstance(obj, pd.Series):
try:
return [x.isoformat() for x in obj.dt.to_pydatetime()]
except AttributeError:
return obj.tolist()
except ImportError:
pass
if isinstance(obj, (datetime, time, date)):
return obj.isoformat()
raise TypeError('Not sure how to serialize {} of type {}'.format(obj, type(obj))) | ['def', 'json_conversion', '(', 'obj', ':', 'Any', ')', '->', 'JSON', ':', 'try', ':', "# numpy isn't an explicit dependency of bowtie", "# so we can't assume it's available", 'import', 'numpy', 'as', 'np', 'if', 'isinstance', '(', 'obj', ',', '(', 'np', '.', 'ndarray', ',', 'np', '.', 'generic', ')', ')', ':', 'return', 'obj', '.', 'tolist', '(', ')', 'except', 'ImportError', ':', 'pass', 'try', ':', "# pandas isn't an explicit dependency of bowtie", "# so we can't assume it's available", 'import', 'pandas', 'as', 'pd', 'if', 'isinstance', '(', 'obj', ',', 'pd', '.', 'DatetimeIndex', ')', ':', 'return', '[', 'x', '.', 'isoformat', '(', ')', 'for', 'x', 'in', 'obj', '.', 'to_pydatetime', '(', ')', ']', 'if', 'isinstance', '(', 'obj', ',', 'pd', '.', 'Index', ')', ':', 'return', 'obj', '.', 'tolist', '(', ')', 'if', 'isinstance', '(', 'obj', ',', 'pd', '.', 'Series', ')', ':', 'try', ':', 'return', '[', 'x', '.', 'isoformat', '(', ')', 'for', 'x', 'in', 'obj', '.', 'dt', '.', 'to_pydatetime', '(', ')', ']', 'except', 'AttributeError', ':', 'return', 'obj', '.', 'tolist', '(', ')', 'except', 'ImportError', ':', 'pass', 'if', 'isinstance', '(', 'obj', ',', '(', 'datetime', ',', 'time', ',', 'date', ')', ')', ':', 'return', 'obj', '.', 'isoformat', '(', ')', 'raise', 'TypeError', '(', "'Not sure how to serialize {} of type {}'", '.', 'format', '(', 'obj', ',', 'type', '(', 'obj', ')', ')', ')'] | Encode additional objects to JSON. | ['Encode', 'additional', 'objects', 'to', 'JSON', '.'] | train | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_component.py#L79-L108 |
8,689 | LPgenerator/django-db-mailer | dbmail/providers/twilio/sms.py | send | def send(sms_to, sms_body, **kwargs):
"""
Site: https://www.twilio.com/
API: https://www.twilio.com/docs/api/rest/sending-messages
"""
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "DBMail/%s" % get_version(),
'Authorization': 'Basic %s' % b64encode(
"%s:%s" % (
settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN
)).decode("ascii")
}
kwargs.update({
'From': kwargs.pop('sms_from', settings.TWILIO_FROM),
'To': sms_to,
'Body': from_unicode(sms_body)
})
http = HTTPSConnection(kwargs.pop("api_url", "api.twilio.com"))
http.request(
"POST",
"/2010-04-01/Accounts/%s/Messages.json" % settings.TWILIO_ACCOUNT_SID,
headers=headers,
body=urlencode(kwargs))
response = http.getresponse()
if response.status != 201:
raise TwilioSmsError(response.reason)
return loads(response.read()).get('sid') | python | def send(sms_to, sms_body, **kwargs):
"""
Site: https://www.twilio.com/
API: https://www.twilio.com/docs/api/rest/sending-messages
"""
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "DBMail/%s" % get_version(),
'Authorization': 'Basic %s' % b64encode(
"%s:%s" % (
settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN
)).decode("ascii")
}
kwargs.update({
'From': kwargs.pop('sms_from', settings.TWILIO_FROM),
'To': sms_to,
'Body': from_unicode(sms_body)
})
http = HTTPSConnection(kwargs.pop("api_url", "api.twilio.com"))
http.request(
"POST",
"/2010-04-01/Accounts/%s/Messages.json" % settings.TWILIO_ACCOUNT_SID,
headers=headers,
body=urlencode(kwargs))
response = http.getresponse()
if response.status != 201:
raise TwilioSmsError(response.reason)
return loads(response.read()).get('sid') | ['def', 'send', '(', 'sms_to', ',', 'sms_body', ',', '*', '*', 'kwargs', ')', ':', 'headers', '=', '{', '"Content-type"', ':', '"application/x-www-form-urlencoded"', ',', '"User-Agent"', ':', '"DBMail/%s"', '%', 'get_version', '(', ')', ',', "'Authorization'", ':', "'Basic %s'", '%', 'b64encode', '(', '"%s:%s"', '%', '(', 'settings', '.', 'TWILIO_ACCOUNT_SID', ',', 'settings', '.', 'TWILIO_AUTH_TOKEN', ')', ')', '.', 'decode', '(', '"ascii"', ')', '}', 'kwargs', '.', 'update', '(', '{', "'From'", ':', 'kwargs', '.', 'pop', '(', "'sms_from'", ',', 'settings', '.', 'TWILIO_FROM', ')', ',', "'To'", ':', 'sms_to', ',', "'Body'", ':', 'from_unicode', '(', 'sms_body', ')', '}', ')', 'http', '=', 'HTTPSConnection', '(', 'kwargs', '.', 'pop', '(', '"api_url"', ',', '"api.twilio.com"', ')', ')', 'http', '.', 'request', '(', '"POST"', ',', '"/2010-04-01/Accounts/%s/Messages.json"', '%', 'settings', '.', 'TWILIO_ACCOUNT_SID', ',', 'headers', '=', 'headers', ',', 'body', '=', 'urlencode', '(', 'kwargs', ')', ')', 'response', '=', 'http', '.', 'getresponse', '(', ')', 'if', 'response', '.', 'status', '!=', '201', ':', 'raise', 'TwilioSmsError', '(', 'response', '.', 'reason', ')', 'return', 'loads', '(', 'response', '.', 'read', '(', ')', ')', '.', 'get', '(', "'sid'", ')'] | Site: https://www.twilio.com/
API: https://www.twilio.com/docs/api/rest/sending-messages | ['Site', ':', 'https', ':', '//', 'www', '.', 'twilio', '.', 'com', '/', 'API', ':', 'https', ':', '//', 'www', '.', 'twilio', '.', 'com', '/', 'docs', '/', 'api', '/', 'rest', '/', 'sending', '-', 'messages'] | train | https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/twilio/sms.py#L23-L55 |
8,690 | google/apitools | apitools/gen/message_registry.py | MessageRegistry.WriteFile | def WriteFile(self, printer):
"""Write the messages file to out."""
self.Validate()
extended_descriptor.WritePythonFile(
self.__file_descriptor, self.__package, self.__client_info.version,
printer) | python | def WriteFile(self, printer):
"""Write the messages file to out."""
self.Validate()
extended_descriptor.WritePythonFile(
self.__file_descriptor, self.__package, self.__client_info.version,
printer) | ['def', 'WriteFile', '(', 'self', ',', 'printer', ')', ':', 'self', '.', 'Validate', '(', ')', 'extended_descriptor', '.', 'WritePythonFile', '(', 'self', '.', '__file_descriptor', ',', 'self', '.', '__package', ',', 'self', '.', '__client_info', '.', 'version', ',', 'printer', ')'] | Write the messages file to out. | ['Write', 'the', 'messages', 'file', 'to', 'out', '.'] | train | https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/message_registry.py#L120-L125 |
8,691 | blockstack/blockstack-core | blockstack/lib/atlas.py | AtlasPeerCrawler.remove_unhealthy_peers | def remove_unhealthy_peers( self, count, con=None, path=None, peer_table=None, min_request_count=10, min_health=MIN_PEER_HEALTH ):
"""
Remove up to @count unhealthy peers
Return the list of peers we removed
"""
if path is None:
path = self.atlasdb_path
removed = []
rank_peer_list = atlas_rank_peers_by_health( peer_table=peer_table, with_rank=True )
for rank, peer in rank_peer_list:
reqcount = atlas_peer_get_request_count( peer, peer_table=peer_table )
if reqcount >= min_request_count and rank < min_health and not atlas_peer_is_whitelisted( peer, peer_table=peer_table ) and not atlas_peer_is_blacklisted( peer, peer_table=peer_table ):
removed.append( peer )
random.shuffle(removed)
if len(removed) > count:
removed = removed[:count]
for peer in removed:
log.debug("Remove unhealthy peer %s" % (peer))
atlasdb_remove_peer( peer, con=con, path=path, peer_table=peer_table )
return removed | python | def remove_unhealthy_peers( self, count, con=None, path=None, peer_table=None, min_request_count=10, min_health=MIN_PEER_HEALTH ):
"""
Remove up to @count unhealthy peers
Return the list of peers we removed
"""
if path is None:
path = self.atlasdb_path
removed = []
rank_peer_list = atlas_rank_peers_by_health( peer_table=peer_table, with_rank=True )
for rank, peer in rank_peer_list:
reqcount = atlas_peer_get_request_count( peer, peer_table=peer_table )
if reqcount >= min_request_count and rank < min_health and not atlas_peer_is_whitelisted( peer, peer_table=peer_table ) and not atlas_peer_is_blacklisted( peer, peer_table=peer_table ):
removed.append( peer )
random.shuffle(removed)
if len(removed) > count:
removed = removed[:count]
for peer in removed:
log.debug("Remove unhealthy peer %s" % (peer))
atlasdb_remove_peer( peer, con=con, path=path, peer_table=peer_table )
return removed | ['def', 'remove_unhealthy_peers', '(', 'self', ',', 'count', ',', 'con', '=', 'None', ',', 'path', '=', 'None', ',', 'peer_table', '=', 'None', ',', 'min_request_count', '=', '10', ',', 'min_health', '=', 'MIN_PEER_HEALTH', ')', ':', 'if', 'path', 'is', 'None', ':', 'path', '=', 'self', '.', 'atlasdb_path', 'removed', '=', '[', ']', 'rank_peer_list', '=', 'atlas_rank_peers_by_health', '(', 'peer_table', '=', 'peer_table', ',', 'with_rank', '=', 'True', ')', 'for', 'rank', ',', 'peer', 'in', 'rank_peer_list', ':', 'reqcount', '=', 'atlas_peer_get_request_count', '(', 'peer', ',', 'peer_table', '=', 'peer_table', ')', 'if', 'reqcount', '>=', 'min_request_count', 'and', 'rank', '<', 'min_health', 'and', 'not', 'atlas_peer_is_whitelisted', '(', 'peer', ',', 'peer_table', '=', 'peer_table', ')', 'and', 'not', 'atlas_peer_is_blacklisted', '(', 'peer', ',', 'peer_table', '=', 'peer_table', ')', ':', 'removed', '.', 'append', '(', 'peer', ')', 'random', '.', 'shuffle', '(', 'removed', ')', 'if', 'len', '(', 'removed', ')', '>', 'count', ':', 'removed', '=', 'removed', '[', ':', 'count', ']', 'for', 'peer', 'in', 'removed', ':', 'log', '.', 'debug', '(', '"Remove unhealthy peer %s"', '%', '(', 'peer', ')', ')', 'atlasdb_remove_peer', '(', 'peer', ',', 'con', '=', 'con', ',', 'path', '=', 'path', ',', 'peer_table', '=', 'peer_table', ')', 'return', 'removed'] | Remove up to @count unhealthy peers
Return the list of peers we removed | ['Remove', 'up', 'to'] | train | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2836-L2860 |
8,692 | spulec/moto | moto/cognitoidp/models.py | paginate | def paginate(limit, start_arg="next_token", limit_arg="max_results"):
"""Returns a limited result list, and an offset into list of remaining items
Takes the next_token, and max_results kwargs given to a function and handles
the slicing of the results. The kwarg `next_token` is the offset into the
list to begin slicing from. `max_results` is the size of the result required
If the max_results is not supplied then the `limit` parameter is used as a
default
:param limit_arg: the name of argument in the decorated function that
controls amount of items returned
:param start_arg: the name of the argument in the decorated that provides
the starting offset
:param limit: A default maximum items to return
:return: a tuple containing a list of items, and the offset into the list
"""
default_start = 0
def outer_wrapper(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = int(default_start if kwargs.get(start_arg) is None else kwargs[start_arg])
lim = int(limit if kwargs.get(limit_arg) is None else kwargs[limit_arg])
stop = start + lim
result = func(*args, **kwargs)
limited_results = list(itertools.islice(result, start, stop))
next_token = stop if stop < len(result) else None
return limited_results, next_token
return wrapper
return outer_wrapper | python | def paginate(limit, start_arg="next_token", limit_arg="max_results"):
"""Returns a limited result list, and an offset into list of remaining items
Takes the next_token, and max_results kwargs given to a function and handles
the slicing of the results. The kwarg `next_token` is the offset into the
list to begin slicing from. `max_results` is the size of the result required
If the max_results is not supplied then the `limit` parameter is used as a
default
:param limit_arg: the name of argument in the decorated function that
controls amount of items returned
:param start_arg: the name of the argument in the decorated that provides
the starting offset
:param limit: A default maximum items to return
:return: a tuple containing a list of items, and the offset into the list
"""
default_start = 0
def outer_wrapper(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = int(default_start if kwargs.get(start_arg) is None else kwargs[start_arg])
lim = int(limit if kwargs.get(limit_arg) is None else kwargs[limit_arg])
stop = start + lim
result = func(*args, **kwargs)
limited_results = list(itertools.islice(result, start, stop))
next_token = stop if stop < len(result) else None
return limited_results, next_token
return wrapper
return outer_wrapper | ['def', 'paginate', '(', 'limit', ',', 'start_arg', '=', '"next_token"', ',', 'limit_arg', '=', '"max_results"', ')', ':', 'default_start', '=', '0', 'def', 'outer_wrapper', '(', 'func', ')', ':', '@', 'functools', '.', 'wraps', '(', 'func', ')', 'def', 'wrapper', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'start', '=', 'int', '(', 'default_start', 'if', 'kwargs', '.', 'get', '(', 'start_arg', ')', 'is', 'None', 'else', 'kwargs', '[', 'start_arg', ']', ')', 'lim', '=', 'int', '(', 'limit', 'if', 'kwargs', '.', 'get', '(', 'limit_arg', ')', 'is', 'None', 'else', 'kwargs', '[', 'limit_arg', ']', ')', 'stop', '=', 'start', '+', 'lim', 'result', '=', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'limited_results', '=', 'list', '(', 'itertools', '.', 'islice', '(', 'result', ',', 'start', ',', 'stop', ')', ')', 'next_token', '=', 'stop', 'if', 'stop', '<', 'len', '(', 'result', ')', 'else', 'None', 'return', 'limited_results', ',', 'next_token', 'return', 'wrapper', 'return', 'outer_wrapper'] | Returns a limited result list, and an offset into list of remaining items
Takes the next_token, and max_results kwargs given to a function and handles
the slicing of the results. The kwarg `next_token` is the offset into the
list to begin slicing from. `max_results` is the size of the result required
If the max_results is not supplied then the `limit` parameter is used as a
default
:param limit_arg: the name of argument in the decorated function that
controls amount of items returned
:param start_arg: the name of the argument in the decorated that provides
the starting offset
:param limit: A default maximum items to return
:return: a tuple containing a list of items, and the offset into the list | ['Returns', 'a', 'limited', 'result', 'list', 'and', 'an', 'offset', 'into', 'list', 'of', 'remaining', 'items'] | train | https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/cognitoidp/models.py#L24-L54 |
8,693 | softwarefactory-project/rdopkg | rdopkg/utils/specfile.py | Spec.get_magic_comment | def get_magic_comment(self, name, expand_macros=False):
"""Return a value of # name=value comment in spec or None."""
match = re.search(r'^#\s*?%s\s?=\s?(\S+)' % re.escape(name),
self.txt, flags=re.M)
if not match:
return None
val = match.group(1)
if expand_macros and has_macros(val):
# don't parse using rpm unless required
val = self.expand_macro(val)
return val | python | def get_magic_comment(self, name, expand_macros=False):
"""Return a value of # name=value comment in spec or None."""
match = re.search(r'^#\s*?%s\s?=\s?(\S+)' % re.escape(name),
self.txt, flags=re.M)
if not match:
return None
val = match.group(1)
if expand_macros and has_macros(val):
# don't parse using rpm unless required
val = self.expand_macro(val)
return val | ['def', 'get_magic_comment', '(', 'self', ',', 'name', ',', 'expand_macros', '=', 'False', ')', ':', 'match', '=', 're', '.', 'search', '(', "r'^#\\s*?%s\\s?=\\s?(\\S+)'", '%', 're', '.', 'escape', '(', 'name', ')', ',', 'self', '.', 'txt', ',', 'flags', '=', 're', '.', 'M', ')', 'if', 'not', 'match', ':', 'return', 'None', 'val', '=', 'match', '.', 'group', '(', '1', ')', 'if', 'expand_macros', 'and', 'has_macros', '(', 'val', ')', ':', "# don't parse using rpm unless required", 'val', '=', 'self', '.', 'expand_macro', '(', 'val', ')', 'return', 'val'] | Return a value of # name=value comment in spec or None. | ['Return', 'a', 'value', 'of', '#', 'name', '=', 'value', 'comment', 'in', 'spec', 'or', 'None', '.'] | train | https://github.com/softwarefactory-project/rdopkg/blob/2d2bed4e7cd329558a36d0dd404ec4ac8f9f254c/rdopkg/utils/specfile.py#L272-L283 |
8,694 | iqbal-lab-org/cluster_vcf_records | cluster_vcf_records/vcf_clusterer.py | VcfClusterer._expand_alts_in_vcf_record_list | def _expand_alts_in_vcf_record_list(cls, vcf_records):
'''Input: list of vcf_records. Returns new list, where
any records with >ALT is replaced with one vcf record per ALT.
This doesn't change FORMAT or INFO columns, which means they
are now broken for those records'''
new_vcf_records = []
for record in vcf_records:
new_vcf_records.extend(record.to_record_per_alt())
return new_vcf_records | python | def _expand_alts_in_vcf_record_list(cls, vcf_records):
'''Input: list of vcf_records. Returns new list, where
any records with >ALT is replaced with one vcf record per ALT.
This doesn't change FORMAT or INFO columns, which means they
are now broken for those records'''
new_vcf_records = []
for record in vcf_records:
new_vcf_records.extend(record.to_record_per_alt())
return new_vcf_records | ['def', '_expand_alts_in_vcf_record_list', '(', 'cls', ',', 'vcf_records', ')', ':', 'new_vcf_records', '=', '[', ']', 'for', 'record', 'in', 'vcf_records', ':', 'new_vcf_records', '.', 'extend', '(', 'record', '.', 'to_record_per_alt', '(', ')', ')', 'return', 'new_vcf_records'] | Input: list of vcf_records. Returns new list, where
any records with >ALT is replaced with one vcf record per ALT.
This doesn't change FORMAT or INFO columns, which means they
are now broken for those records | ['Input', ':', 'list', 'of', 'vcf_records', '.', 'Returns', 'new', 'list', 'where', 'any', 'records', 'with', '>', 'ALT', 'is', 'replaced', 'with', 'one', 'vcf', 'record', 'per', 'ALT', '.', 'This', 'doesn', 't', 'change', 'FORMAT', 'or', 'INFO', 'columns', 'which', 'means', 'they', 'are', 'now', 'broken', 'for', 'those', 'records'] | train | https://github.com/iqbal-lab-org/cluster_vcf_records/blob/0db26af36b6da97a7361364457d2152dc756055c/cluster_vcf_records/vcf_clusterer.py#L115-L123 |
8,695 | hydraplatform/hydra-base | hydra_base/util/permissions.py | required_role | def required_role(req_role):
"""
Decorator applied to functions requiring caller to possess the specified role
"""
def dec_wrapper(wfunc):
@wraps(wfunc)
def wrapped(*args, **kwargs):
user_id = kwargs.get("user_id")
try:
res = db.DBSession.query(RoleUser).filter(RoleUser.user_id==user_id).join(Role, Role.code==req_role).one()
except NoResultFound:
raise PermissionError("Permission denied. User %s does not have role %s"%
(user_id, req_role))
return wfunc(*args, **kwargs)
return wrapped
return dec_wrapper | python | def required_role(req_role):
"""
Decorator applied to functions requiring caller to possess the specified role
"""
def dec_wrapper(wfunc):
@wraps(wfunc)
def wrapped(*args, **kwargs):
user_id = kwargs.get("user_id")
try:
res = db.DBSession.query(RoleUser).filter(RoleUser.user_id==user_id).join(Role, Role.code==req_role).one()
except NoResultFound:
raise PermissionError("Permission denied. User %s does not have role %s"%
(user_id, req_role))
return wfunc(*args, **kwargs)
return wrapped
return dec_wrapper | ['def', 'required_role', '(', 'req_role', ')', ':', 'def', 'dec_wrapper', '(', 'wfunc', ')', ':', '@', 'wraps', '(', 'wfunc', ')', 'def', 'wrapped', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'user_id', '=', 'kwargs', '.', 'get', '(', '"user_id"', ')', 'try', ':', 'res', '=', 'db', '.', 'DBSession', '.', 'query', '(', 'RoleUser', ')', '.', 'filter', '(', 'RoleUser', '.', 'user_id', '==', 'user_id', ')', '.', 'join', '(', 'Role', ',', 'Role', '.', 'code', '==', 'req_role', ')', '.', 'one', '(', ')', 'except', 'NoResultFound', ':', 'raise', 'PermissionError', '(', '"Permission denied. User %s does not have role %s"', '%', '(', 'user_id', ',', 'req_role', ')', ')', 'return', 'wfunc', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'wrapped', 'return', 'dec_wrapper'] | Decorator applied to functions requiring caller to possess the specified role | ['Decorator', 'applied', 'to', 'functions', 'requiring', 'caller', 'to', 'possess', 'the', 'specified', 'role'] | train | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/util/permissions.py#L70-L87 |
8,696 | Demonware/jose | jose.py | legacy_decrypt | def legacy_decrypt(jwe, jwk, adata='', validate_claims=True,
expiry_seconds=None):
""" Decrypts a deserialized :class:`~jose.JWE`
:param jwe: An instance of :class:`~jose.JWE`
:param jwk: A `dict` representing the JWK required to decrypt the content
of the :class:`~jose.JWE`.
:param adata: Arbitrary string data used during encryption for additional
authentication.
:param validate_claims: A `bool` indicating whether or not the `exp`, `iat`
and `nbf` claims should be validated. Defaults to
`True`.
:param expiry_seconds: An `int` containing the JWT expiry in seconds, used
when evaluating the `iat` claim. Defaults to `None`,
which disables `iat` claim validation.
:rtype: :class:`~jose.JWT`
:raises: :class:`~jose.Expired` if the JWT has expired
:raises: :class:`~jose.NotYetValid` if the JWT is not yet valid
:raises: :class:`~jose.Error` if there is an error decrypting the JWE
"""
protected_header, encrypted_key, iv, ciphertext, authentication_tag = map(
b64decode_url, jwe)
header = json_decode(protected_header)
alg = header[HEADER_ALG]
enc = header[HEADER_ENC]
# decrypt cek
encryption_key = _decrypt_key(encrypted_key, jwk, alg)
# decrypt body
((_, decipher), _), ((hash_fn, _), mod) = JWA[enc]
version = header.get(_TEMP_VER_KEY)
if version:
plaintext = decipher(ciphertext, encryption_key[-mod.digest_size/2:],
iv)
hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version),
encryption_key[:-mod.digest_size/2], mod=mod)
else:
plaintext = decipher(ciphertext, encryption_key[:-mod.digest_size], iv)
hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version),
encryption_key[-mod.digest_size:], mod=mod)
if not const_compare(auth_tag(hash), authentication_tag):
raise Error('Mismatched authentication tags')
if HEADER_ZIP in header:
try:
(_, decompress) = COMPRESSION[header[HEADER_ZIP]]
except KeyError:
raise Error('Unsupported compression algorithm: {}'.format(
header[HEADER_ZIP]))
plaintext = decompress(plaintext)
claims = json_decode(plaintext)
try:
del claims[_TEMP_VER_KEY]
except KeyError:
# expected when decrypting legacy tokens
pass
_validate(claims, validate_claims, expiry_seconds)
return JWT(header, claims) | python | def legacy_decrypt(jwe, jwk, adata='', validate_claims=True,
expiry_seconds=None):
""" Decrypts a deserialized :class:`~jose.JWE`
:param jwe: An instance of :class:`~jose.JWE`
:param jwk: A `dict` representing the JWK required to decrypt the content
of the :class:`~jose.JWE`.
:param adata: Arbitrary string data used during encryption for additional
authentication.
:param validate_claims: A `bool` indicating whether or not the `exp`, `iat`
and `nbf` claims should be validated. Defaults to
`True`.
:param expiry_seconds: An `int` containing the JWT expiry in seconds, used
when evaluating the `iat` claim. Defaults to `None`,
which disables `iat` claim validation.
:rtype: :class:`~jose.JWT`
:raises: :class:`~jose.Expired` if the JWT has expired
:raises: :class:`~jose.NotYetValid` if the JWT is not yet valid
:raises: :class:`~jose.Error` if there is an error decrypting the JWE
"""
protected_header, encrypted_key, iv, ciphertext, authentication_tag = map(
b64decode_url, jwe)
header = json_decode(protected_header)
alg = header[HEADER_ALG]
enc = header[HEADER_ENC]
# decrypt cek
encryption_key = _decrypt_key(encrypted_key, jwk, alg)
# decrypt body
((_, decipher), _), ((hash_fn, _), mod) = JWA[enc]
version = header.get(_TEMP_VER_KEY)
if version:
plaintext = decipher(ciphertext, encryption_key[-mod.digest_size/2:],
iv)
hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version),
encryption_key[:-mod.digest_size/2], mod=mod)
else:
plaintext = decipher(ciphertext, encryption_key[:-mod.digest_size], iv)
hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version),
encryption_key[-mod.digest_size:], mod=mod)
if not const_compare(auth_tag(hash), authentication_tag):
raise Error('Mismatched authentication tags')
if HEADER_ZIP in header:
try:
(_, decompress) = COMPRESSION[header[HEADER_ZIP]]
except KeyError:
raise Error('Unsupported compression algorithm: {}'.format(
header[HEADER_ZIP]))
plaintext = decompress(plaintext)
claims = json_decode(plaintext)
try:
del claims[_TEMP_VER_KEY]
except KeyError:
# expected when decrypting legacy tokens
pass
_validate(claims, validate_claims, expiry_seconds)
return JWT(header, claims) | ['def', 'legacy_decrypt', '(', 'jwe', ',', 'jwk', ',', 'adata', '=', "''", ',', 'validate_claims', '=', 'True', ',', 'expiry_seconds', '=', 'None', ')', ':', 'protected_header', ',', 'encrypted_key', ',', 'iv', ',', 'ciphertext', ',', 'authentication_tag', '=', 'map', '(', 'b64decode_url', ',', 'jwe', ')', 'header', '=', 'json_decode', '(', 'protected_header', ')', 'alg', '=', 'header', '[', 'HEADER_ALG', ']', 'enc', '=', 'header', '[', 'HEADER_ENC', ']', '# decrypt cek', 'encryption_key', '=', '_decrypt_key', '(', 'encrypted_key', ',', 'jwk', ',', 'alg', ')', '# decrypt body', '(', '(', '_', ',', 'decipher', ')', ',', '_', ')', ',', '(', '(', 'hash_fn', ',', '_', ')', ',', 'mod', ')', '=', 'JWA', '[', 'enc', ']', 'version', '=', 'header', '.', 'get', '(', '_TEMP_VER_KEY', ')', 'if', 'version', ':', 'plaintext', '=', 'decipher', '(', 'ciphertext', ',', 'encryption_key', '[', '-', 'mod', '.', 'digest_size', '/', '2', ':', ']', ',', 'iv', ')', 'hash', '=', 'hash_fn', '(', '_jwe_hash_str', '(', 'ciphertext', ',', 'iv', ',', 'adata', ',', 'version', ')', ',', 'encryption_key', '[', ':', '-', 'mod', '.', 'digest_size', '/', '2', ']', ',', 'mod', '=', 'mod', ')', 'else', ':', 'plaintext', '=', 'decipher', '(', 'ciphertext', ',', 'encryption_key', '[', ':', '-', 'mod', '.', 'digest_size', ']', ',', 'iv', ')', 'hash', '=', 'hash_fn', '(', '_jwe_hash_str', '(', 'ciphertext', ',', 'iv', ',', 'adata', ',', 'version', ')', ',', 'encryption_key', '[', '-', 'mod', '.', 'digest_size', ':', ']', ',', 'mod', '=', 'mod', ')', 'if', 'not', 'const_compare', '(', 'auth_tag', '(', 'hash', ')', ',', 'authentication_tag', ')', ':', 'raise', 'Error', '(', "'Mismatched authentication tags'", ')', 'if', 'HEADER_ZIP', 'in', 'header', ':', 'try', ':', '(', '_', ',', 'decompress', ')', '=', 'COMPRESSION', '[', 'header', '[', 'HEADER_ZIP', ']', ']', 'except', 'KeyError', ':', 'raise', 'Error', '(', "'Unsupported compression algorithm: {}'", '.', 'format', '(', 'header', '[', 'HEADER_ZIP', ']', ')', ')', 'plaintext', '=', 'decompress', '(', 'plaintext', ')', 'claims', '=', 'json_decode', '(', 'plaintext', ')', 'try', ':', 'del', 'claims', '[', '_TEMP_VER_KEY', ']', 'except', 'KeyError', ':', '# expected when decrypting legacy tokens', 'pass', '_validate', '(', 'claims', ',', 'validate_claims', ',', 'expiry_seconds', ')', 'return', 'JWT', '(', 'header', ',', 'claims', ')'] | Decrypts a deserialized :class:`~jose.JWE`
:param jwe: An instance of :class:`~jose.JWE`
:param jwk: A `dict` representing the JWK required to decrypt the content
of the :class:`~jose.JWE`.
:param adata: Arbitrary string data used during encryption for additional
authentication.
:param validate_claims: A `bool` indicating whether or not the `exp`, `iat`
and `nbf` claims should be validated. Defaults to
`True`.
:param expiry_seconds: An `int` containing the JWT expiry in seconds, used
when evaluating the `iat` claim. Defaults to `None`,
which disables `iat` claim validation.
:rtype: :class:`~jose.JWT`
:raises: :class:`~jose.Expired` if the JWT has expired
:raises: :class:`~jose.NotYetValid` if the JWT is not yet valid
:raises: :class:`~jose.Error` if there is an error decrypting the JWE | ['Decrypts', 'a', 'deserialized', ':', 'class', ':', '~jose', '.', 'JWE'] | train | https://github.com/Demonware/jose/blob/5835ec9c9fcab17eddea3c3169881ec12df552d4/jose.py#L315-L380 |
8,697 | saltstack/salt | salt/modules/iosconfig.py | merge_text | def merge_text(initial_config=None,
initial_path=None,
merge_config=None,
merge_path=None,
saltenv='base'):
'''
Return the merge result of the ``initial_config`` with the ``merge_config``,
as plain text.
initial_config
The initial configuration sent as text. This argument is ignored when
``initial_path`` is set.
initial_path
Absolute or remote path from where to load the initial configuration
text. This argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
merge_config
The config to be merged into the initial config, sent as text. This
argument is ignored when ``merge_path`` is set.
merge_path
Absolute or remote path from where to load the merge configuration
text. This argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file.
Ignored if ``initial_path`` or ``merge_path`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' iosconfig.merge_text initial_path=salt://path/to/running.cfg merge_path=salt://path/to/merge.cfg
'''
candidate_tree = merge_tree(initial_config=initial_config,
initial_path=initial_path,
merge_config=merge_config,
merge_path=merge_path,
saltenv=saltenv)
return _print_config_text(candidate_tree) | python | def merge_text(initial_config=None,
initial_path=None,
merge_config=None,
merge_path=None,
saltenv='base'):
'''
Return the merge result of the ``initial_config`` with the ``merge_config``,
as plain text.
initial_config
The initial configuration sent as text. This argument is ignored when
``initial_path`` is set.
initial_path
Absolute or remote path from where to load the initial configuration
text. This argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
merge_config
The config to be merged into the initial config, sent as text. This
argument is ignored when ``merge_path`` is set.
merge_path
Absolute or remote path from where to load the merge configuration
text. This argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file.
Ignored if ``initial_path`` or ``merge_path`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' iosconfig.merge_text initial_path=salt://path/to/running.cfg merge_path=salt://path/to/merge.cfg
'''
candidate_tree = merge_tree(initial_config=initial_config,
initial_path=initial_path,
merge_config=merge_config,
merge_path=merge_path,
saltenv=saltenv)
return _print_config_text(candidate_tree) | ['def', 'merge_text', '(', 'initial_config', '=', 'None', ',', 'initial_path', '=', 'None', ',', 'merge_config', '=', 'None', ',', 'merge_path', '=', 'None', ',', 'saltenv', '=', "'base'", ')', ':', 'candidate_tree', '=', 'merge_tree', '(', 'initial_config', '=', 'initial_config', ',', 'initial_path', '=', 'initial_path', ',', 'merge_config', '=', 'merge_config', ',', 'merge_path', '=', 'merge_path', ',', 'saltenv', '=', 'saltenv', ')', 'return', '_print_config_text', '(', 'candidate_tree', ')'] | Return the merge result of the ``initial_config`` with the ``merge_config``,
as plain text.
initial_config
The initial configuration sent as text. This argument is ignored when
``initial_path`` is set.
initial_path
Absolute or remote path from where to load the initial configuration
text. This argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
merge_config
The config to be merged into the initial config, sent as text. This
argument is ignored when ``merge_path`` is set.
merge_path
Absolute or remote path from where to load the merge configuration
text. This argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file.
Ignored if ``initial_path`` or ``merge_path`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' iosconfig.merge_text initial_path=salt://path/to/running.cfg merge_path=salt://path/to/merge.cfg | ['Return', 'the', 'merge', 'result', 'of', 'the', 'initial_config', 'with', 'the', 'merge_config', 'as', 'plain', 'text', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/iosconfig.py#L261-L305 |
8,698 | libtcod/python-tcod | tdl/__init__.py | _format_char | def _format_char(char):
"""Prepares a single character for passing to ctypes calls, needs to return
an integer but can also pass None which will keep the current character
instead of overwriting it.
This is called often and needs to be optimized whenever possible.
"""
if char is None:
return -1
if isinstance(char, _STRTYPES) and len(char) == 1:
return ord(char)
try:
return int(char) # allow all int-like objects
except:
raise TypeError('char single character string, integer, or None\nReceived: ' + repr(char)) | python | def _format_char(char):
"""Prepares a single character for passing to ctypes calls, needs to return
an integer but can also pass None which will keep the current character
instead of overwriting it.
This is called often and needs to be optimized whenever possible.
"""
if char is None:
return -1
if isinstance(char, _STRTYPES) and len(char) == 1:
return ord(char)
try:
return int(char) # allow all int-like objects
except:
raise TypeError('char single character string, integer, or None\nReceived: ' + repr(char)) | ['def', '_format_char', '(', 'char', ')', ':', 'if', 'char', 'is', 'None', ':', 'return', '-', '1', 'if', 'isinstance', '(', 'char', ',', '_STRTYPES', ')', 'and', 'len', '(', 'char', ')', '==', '1', ':', 'return', 'ord', '(', 'char', ')', 'try', ':', 'return', 'int', '(', 'char', ')', '# allow all int-like objects', 'except', ':', 'raise', 'TypeError', '(', "'char single character string, integer, or None\\nReceived: '", '+', 'repr', '(', 'char', ')', ')'] | Prepares a single character for passing to ctypes calls, needs to return
an integer but can also pass None which will keep the current character
instead of overwriting it.
This is called often and needs to be optimized whenever possible. | ['Prepares', 'a', 'single', 'character', 'for', 'passing', 'to', 'ctypes', 'calls', 'needs', 'to', 'return', 'an', 'integer', 'but', 'can', 'also', 'pass', 'None', 'which', 'will', 'keep', 'the', 'current', 'character', 'instead', 'of', 'overwriting', 'it', '.'] | train | https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tdl/__init__.py#L101-L115 |
8,699 | boriel/zxbasic | zxbparser.py | p_bexpr_func | def p_bexpr_func(p):
""" bexpr : ID bexpr
"""
args = make_arg_list(make_argument(p[2], p.lineno(2)))
p[0] = make_call(p[1], p.lineno(1), args)
if p[0] is None:
return
if p[0].token in ('STRSLICE', 'VAR', 'STRING'):
entry = SYMBOL_TABLE.access_call(p[1], p.lineno(1))
entry.accessed = True
return
# TODO: Check that arrays really needs kind=function to be set
# Both array accesses and functions are tagged as functions
# functions also has the class_ attribute set to 'function'
p[0].entry.set_kind(KIND.function, p.lineno(1))
p[0].entry.accessed = True | python | def p_bexpr_func(p):
""" bexpr : ID bexpr
"""
args = make_arg_list(make_argument(p[2], p.lineno(2)))
p[0] = make_call(p[1], p.lineno(1), args)
if p[0] is None:
return
if p[0].token in ('STRSLICE', 'VAR', 'STRING'):
entry = SYMBOL_TABLE.access_call(p[1], p.lineno(1))
entry.accessed = True
return
# TODO: Check that arrays really needs kind=function to be set
# Both array accesses and functions are tagged as functions
# functions also has the class_ attribute set to 'function'
p[0].entry.set_kind(KIND.function, p.lineno(1))
p[0].entry.accessed = True | ['def', 'p_bexpr_func', '(', 'p', ')', ':', 'args', '=', 'make_arg_list', '(', 'make_argument', '(', 'p', '[', '2', ']', ',', 'p', '.', 'lineno', '(', '2', ')', ')', ')', 'p', '[', '0', ']', '=', 'make_call', '(', 'p', '[', '1', ']', ',', 'p', '.', 'lineno', '(', '1', ')', ',', 'args', ')', 'if', 'p', '[', '0', ']', 'is', 'None', ':', 'return', 'if', 'p', '[', '0', ']', '.', 'token', 'in', '(', "'STRSLICE'", ',', "'VAR'", ',', "'STRING'", ')', ':', 'entry', '=', 'SYMBOL_TABLE', '.', 'access_call', '(', 'p', '[', '1', ']', ',', 'p', '.', 'lineno', '(', '1', ')', ')', 'entry', '.', 'accessed', '=', 'True', 'return', '# TODO: Check that arrays really needs kind=function to be set', '# Both array accesses and functions are tagged as functions', "# functions also has the class_ attribute set to 'function'", 'p', '[', '0', ']', '.', 'entry', '.', 'set_kind', '(', 'KIND', '.', 'function', ',', 'p', '.', 'lineno', '(', '1', ')', ')', 'p', '[', '0', ']', '.', 'entry', '.', 'accessed', '=', 'True'] | bexpr : ID bexpr | ['bexpr', ':', 'ID', 'bexpr'] | train | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L2696-L2713 |
Subsets and Splits