id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
5,300 | loli/medpy | medpy/features/intensity.py | mask_distance | def mask_distance(image, voxelspacing = None, mask = slice(None)):
r"""
Computes the distance of each point under the mask to the mask border taking the
voxel-spacing into account.
Note that this feature is independent of the actual image content, but depends
solely the mask image. Therefore always a one-dimensional feature is returned,
even if a multi-spectral image has been supplied.
If no mask has been supplied, the distances to the image borders are returned.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
mask_distance : ndarray
Each voxels distance to the mask borders.
"""
if type(image) == tuple or type(image) == list:
image = image[0]
return _extract_mask_distance(image, mask = mask, voxelspacing = voxelspacing) | python | def mask_distance(image, voxelspacing = None, mask = slice(None)):
r"""
Computes the distance of each point under the mask to the mask border taking the
voxel-spacing into account.
Note that this feature is independent of the actual image content, but depends
solely the mask image. Therefore always a one-dimensional feature is returned,
even if a multi-spectral image has been supplied.
If no mask has been supplied, the distances to the image borders are returned.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
mask_distance : ndarray
Each voxels distance to the mask borders.
"""
if type(image) == tuple or type(image) == list:
image = image[0]
return _extract_mask_distance(image, mask = mask, voxelspacing = voxelspacing) | [
"def",
"mask_distance",
"(",
"image",
",",
"voxelspacing",
"=",
"None",
",",
"mask",
"=",
"slice",
"(",
"None",
")",
")",
":",
"if",
"type",
"(",
"image",
")",
"==",
"tuple",
"or",
"type",
"(",
"image",
")",
"==",
"list",
":",
"image",
"=",
"image",
"[",
"0",
"]",
"return",
"_extract_mask_distance",
"(",
"image",
",",
"mask",
"=",
"mask",
",",
"voxelspacing",
"=",
"voxelspacing",
")"
] | r"""
Computes the distance of each point under the mask to the mask border taking the
voxel-spacing into account.
Note that this feature is independent of the actual image content, but depends
solely the mask image. Therefore always a one-dimensional feature is returned,
even if a multi-spectral image has been supplied.
If no mask has been supplied, the distances to the image borders are returned.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
mask_distance : ndarray
Each voxels distance to the mask borders. | [
"r",
"Computes",
"the",
"distance",
"of",
"each",
"point",
"under",
"the",
"mask",
"to",
"the",
"mask",
"border",
"taking",
"the",
"voxel",
"-",
"spacing",
"into",
"account",
".",
"Note",
"that",
"this",
"feature",
"is",
"independent",
"of",
"the",
"actual",
"image",
"content",
"but",
"depends",
"solely",
"the",
"mask",
"image",
".",
"Therefore",
"always",
"a",
"one",
"-",
"dimensional",
"feature",
"is",
"returned",
"even",
"if",
"a",
"multi",
"-",
"spectral",
"image",
"has",
"been",
"supplied",
".",
"If",
"no",
"mask",
"has",
"been",
"supplied",
"the",
"distances",
"to",
"the",
"image",
"borders",
"are",
"returned",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L246-L275 |
5,301 | loli/medpy | medpy/features/intensity.py | _extract_hemispheric_difference | def _extract_hemispheric_difference(image, mask = slice(None), sigma_active = 7, sigma_reference = 7, cut_plane = 0, voxelspacing = None):
"""
Internal, single-image version of `hemispheric_difference`.
"""
# constants
INTERPOLATION_RANGE = int(10) # how many neighbouring values to take into account when interpolating the medial longitudinal fissure slice
# check arguments
if cut_plane >= image.ndim:
raise ArgumentError('The suppliedc cut-plane ({}) is invalid, the image has only {} dimensions.'.format(cut_plane, image.ndim))
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# compute the (presumed) location of the medial longitudinal fissure, treating also the special of an odd number of slices, in which case a cut into two equal halves is not possible
medial_longitudinal_fissure = int(image.shape[cut_plane] / 2)
medial_longitudinal_fissure_excluded = image.shape[cut_plane] % 2
# split the head into a dexter and sinister half along the saggital plane
# this is assumed to be consistent with a cut of the brain along the medial longitudinal fissure, thus separating it into its hemispheres
slicer = [slice(None)] * image.ndim
slicer[cut_plane] = slice(None, medial_longitudinal_fissure)
left_hemisphere = image[slicer]
slicer[cut_plane] = slice(medial_longitudinal_fissure + medial_longitudinal_fissure_excluded, None)
right_hemisphere = image[slicer]
# flip right hemisphere image along cut plane
slicer[cut_plane] = slice(None, None, -1)
right_hemisphere = right_hemisphere[slicer]
# substract once left from right and once right from left hemisphere, including smoothing steps
right_hemisphere_difference = _substract_hemispheres(right_hemisphere, left_hemisphere, sigma_active, sigma_reference, voxelspacing)
left_hemisphere_difference = _substract_hemispheres(left_hemisphere, right_hemisphere, sigma_active, sigma_reference, voxelspacing)
# re-flip right hemisphere image to original orientation
right_hemisphere_difference = right_hemisphere_difference[slicer]
# estimate the medial longitudinal fissure if required
if 1 == medial_longitudinal_fissure_excluded:
left_slicer = [slice(None)] * image.ndim
right_slicer = [slice(None)] * image.ndim
left_slicer[cut_plane] = slice(-1 * INTERPOLATION_RANGE, None)
right_slicer[cut_plane] = slice(None, INTERPOLATION_RANGE)
interp_data_left = left_hemisphere_difference[left_slicer]
interp_data_right = right_hemisphere_difference[right_slicer]
interp_indices_left = list(range(-1 * interp_data_left.shape[cut_plane], 0))
interp_indices_right = list(range(1, interp_data_right.shape[cut_plane] + 1))
interp_data = numpy.concatenate((left_hemisphere_difference[left_slicer], right_hemisphere_difference[right_slicer]), cut_plane)
interp_indices = numpy.concatenate((interp_indices_left, interp_indices_right), 0)
medial_longitudinal_fissure_estimated = interp1d(interp_indices, interp_data, kind='cubic', axis=cut_plane)(0)
# add singleton dimension
slicer[cut_plane] = numpy.newaxis
medial_longitudinal_fissure_estimated = medial_longitudinal_fissure_estimated[slicer]
# stich images back together
if 1 == medial_longitudinal_fissure_excluded:
hemisphere_difference = numpy.concatenate((left_hemisphere_difference, medial_longitudinal_fissure_estimated, right_hemisphere_difference), cut_plane)
else:
hemisphere_difference = numpy.concatenate((left_hemisphere_difference, right_hemisphere_difference), cut_plane)
# extract intensities and return
return _extract_intensities(hemisphere_difference, mask) | python | def _extract_hemispheric_difference(image, mask = slice(None), sigma_active = 7, sigma_reference = 7, cut_plane = 0, voxelspacing = None):
"""
Internal, single-image version of `hemispheric_difference`.
"""
# constants
INTERPOLATION_RANGE = int(10) # how many neighbouring values to take into account when interpolating the medial longitudinal fissure slice
# check arguments
if cut_plane >= image.ndim:
raise ArgumentError('The suppliedc cut-plane ({}) is invalid, the image has only {} dimensions.'.format(cut_plane, image.ndim))
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# compute the (presumed) location of the medial longitudinal fissure, treating also the special of an odd number of slices, in which case a cut into two equal halves is not possible
medial_longitudinal_fissure = int(image.shape[cut_plane] / 2)
medial_longitudinal_fissure_excluded = image.shape[cut_plane] % 2
# split the head into a dexter and sinister half along the saggital plane
# this is assumed to be consistent with a cut of the brain along the medial longitudinal fissure, thus separating it into its hemispheres
slicer = [slice(None)] * image.ndim
slicer[cut_plane] = slice(None, medial_longitudinal_fissure)
left_hemisphere = image[slicer]
slicer[cut_plane] = slice(medial_longitudinal_fissure + medial_longitudinal_fissure_excluded, None)
right_hemisphere = image[slicer]
# flip right hemisphere image along cut plane
slicer[cut_plane] = slice(None, None, -1)
right_hemisphere = right_hemisphere[slicer]
# substract once left from right and once right from left hemisphere, including smoothing steps
right_hemisphere_difference = _substract_hemispheres(right_hemisphere, left_hemisphere, sigma_active, sigma_reference, voxelspacing)
left_hemisphere_difference = _substract_hemispheres(left_hemisphere, right_hemisphere, sigma_active, sigma_reference, voxelspacing)
# re-flip right hemisphere image to original orientation
right_hemisphere_difference = right_hemisphere_difference[slicer]
# estimate the medial longitudinal fissure if required
if 1 == medial_longitudinal_fissure_excluded:
left_slicer = [slice(None)] * image.ndim
right_slicer = [slice(None)] * image.ndim
left_slicer[cut_plane] = slice(-1 * INTERPOLATION_RANGE, None)
right_slicer[cut_plane] = slice(None, INTERPOLATION_RANGE)
interp_data_left = left_hemisphere_difference[left_slicer]
interp_data_right = right_hemisphere_difference[right_slicer]
interp_indices_left = list(range(-1 * interp_data_left.shape[cut_plane], 0))
interp_indices_right = list(range(1, interp_data_right.shape[cut_plane] + 1))
interp_data = numpy.concatenate((left_hemisphere_difference[left_slicer], right_hemisphere_difference[right_slicer]), cut_plane)
interp_indices = numpy.concatenate((interp_indices_left, interp_indices_right), 0)
medial_longitudinal_fissure_estimated = interp1d(interp_indices, interp_data, kind='cubic', axis=cut_plane)(0)
# add singleton dimension
slicer[cut_plane] = numpy.newaxis
medial_longitudinal_fissure_estimated = medial_longitudinal_fissure_estimated[slicer]
# stich images back together
if 1 == medial_longitudinal_fissure_excluded:
hemisphere_difference = numpy.concatenate((left_hemisphere_difference, medial_longitudinal_fissure_estimated, right_hemisphere_difference), cut_plane)
else:
hemisphere_difference = numpy.concatenate((left_hemisphere_difference, right_hemisphere_difference), cut_plane)
# extract intensities and return
return _extract_intensities(hemisphere_difference, mask) | [
"def",
"_extract_hemispheric_difference",
"(",
"image",
",",
"mask",
"=",
"slice",
"(",
"None",
")",
",",
"sigma_active",
"=",
"7",
",",
"sigma_reference",
"=",
"7",
",",
"cut_plane",
"=",
"0",
",",
"voxelspacing",
"=",
"None",
")",
":",
"# constants",
"INTERPOLATION_RANGE",
"=",
"int",
"(",
"10",
")",
"# how many neighbouring values to take into account when interpolating the medial longitudinal fissure slice",
"# check arguments",
"if",
"cut_plane",
">=",
"image",
".",
"ndim",
":",
"raise",
"ArgumentError",
"(",
"'The suppliedc cut-plane ({}) is invalid, the image has only {} dimensions.'",
".",
"format",
"(",
"cut_plane",
",",
"image",
".",
"ndim",
")",
")",
"# set voxel spacing",
"if",
"voxelspacing",
"is",
"None",
":",
"voxelspacing",
"=",
"[",
"1.",
"]",
"*",
"image",
".",
"ndim",
"# compute the (presumed) location of the medial longitudinal fissure, treating also the special of an odd number of slices, in which case a cut into two equal halves is not possible",
"medial_longitudinal_fissure",
"=",
"int",
"(",
"image",
".",
"shape",
"[",
"cut_plane",
"]",
"/",
"2",
")",
"medial_longitudinal_fissure_excluded",
"=",
"image",
".",
"shape",
"[",
"cut_plane",
"]",
"%",
"2",
"# split the head into a dexter and sinister half along the saggital plane",
"# this is assumed to be consistent with a cut of the brain along the medial longitudinal fissure, thus separating it into its hemispheres",
"slicer",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"image",
".",
"ndim",
"slicer",
"[",
"cut_plane",
"]",
"=",
"slice",
"(",
"None",
",",
"medial_longitudinal_fissure",
")",
"left_hemisphere",
"=",
"image",
"[",
"slicer",
"]",
"slicer",
"[",
"cut_plane",
"]",
"=",
"slice",
"(",
"medial_longitudinal_fissure",
"+",
"medial_longitudinal_fissure_excluded",
",",
"None",
")",
"right_hemisphere",
"=",
"image",
"[",
"slicer",
"]",
"# flip right hemisphere image along cut plane",
"slicer",
"[",
"cut_plane",
"]",
"=",
"slice",
"(",
"None",
",",
"None",
",",
"-",
"1",
")",
"right_hemisphere",
"=",
"right_hemisphere",
"[",
"slicer",
"]",
"# substract once left from right and once right from left hemisphere, including smoothing steps",
"right_hemisphere_difference",
"=",
"_substract_hemispheres",
"(",
"right_hemisphere",
",",
"left_hemisphere",
",",
"sigma_active",
",",
"sigma_reference",
",",
"voxelspacing",
")",
"left_hemisphere_difference",
"=",
"_substract_hemispheres",
"(",
"left_hemisphere",
",",
"right_hemisphere",
",",
"sigma_active",
",",
"sigma_reference",
",",
"voxelspacing",
")",
"# re-flip right hemisphere image to original orientation",
"right_hemisphere_difference",
"=",
"right_hemisphere_difference",
"[",
"slicer",
"]",
"# estimate the medial longitudinal fissure if required",
"if",
"1",
"==",
"medial_longitudinal_fissure_excluded",
":",
"left_slicer",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"image",
".",
"ndim",
"right_slicer",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"image",
".",
"ndim",
"left_slicer",
"[",
"cut_plane",
"]",
"=",
"slice",
"(",
"-",
"1",
"*",
"INTERPOLATION_RANGE",
",",
"None",
")",
"right_slicer",
"[",
"cut_plane",
"]",
"=",
"slice",
"(",
"None",
",",
"INTERPOLATION_RANGE",
")",
"interp_data_left",
"=",
"left_hemisphere_difference",
"[",
"left_slicer",
"]",
"interp_data_right",
"=",
"right_hemisphere_difference",
"[",
"right_slicer",
"]",
"interp_indices_left",
"=",
"list",
"(",
"range",
"(",
"-",
"1",
"*",
"interp_data_left",
".",
"shape",
"[",
"cut_plane",
"]",
",",
"0",
")",
")",
"interp_indices_right",
"=",
"list",
"(",
"range",
"(",
"1",
",",
"interp_data_right",
".",
"shape",
"[",
"cut_plane",
"]",
"+",
"1",
")",
")",
"interp_data",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"left_hemisphere_difference",
"[",
"left_slicer",
"]",
",",
"right_hemisphere_difference",
"[",
"right_slicer",
"]",
")",
",",
"cut_plane",
")",
"interp_indices",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"interp_indices_left",
",",
"interp_indices_right",
")",
",",
"0",
")",
"medial_longitudinal_fissure_estimated",
"=",
"interp1d",
"(",
"interp_indices",
",",
"interp_data",
",",
"kind",
"=",
"'cubic'",
",",
"axis",
"=",
"cut_plane",
")",
"(",
"0",
")",
"# add singleton dimension",
"slicer",
"[",
"cut_plane",
"]",
"=",
"numpy",
".",
"newaxis",
"medial_longitudinal_fissure_estimated",
"=",
"medial_longitudinal_fissure_estimated",
"[",
"slicer",
"]",
"# stich images back together",
"if",
"1",
"==",
"medial_longitudinal_fissure_excluded",
":",
"hemisphere_difference",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"left_hemisphere_difference",
",",
"medial_longitudinal_fissure_estimated",
",",
"right_hemisphere_difference",
")",
",",
"cut_plane",
")",
"else",
":",
"hemisphere_difference",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"left_hemisphere_difference",
",",
"right_hemisphere_difference",
")",
",",
"cut_plane",
")",
"# extract intensities and return",
"return",
"_extract_intensities",
"(",
"hemisphere_difference",
",",
"mask",
")"
] | Internal, single-image version of `hemispheric_difference`. | [
"Internal",
"single",
"-",
"image",
"version",
"of",
"hemispheric_difference",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L522-L585 |
5,302 | loli/medpy | medpy/features/intensity.py | _extract_local_histogram | def _extract_local_histogram(image, mask=slice(None), bins=19, rang="image", cutoffp=(0.0, 100.0), size=None, footprint=None, output=None, mode="ignore", origin=0):
"""
Internal, single-image version of @see local_histogram
Note: Values outside of the histograms range are not considered.
Note: Mode constant is not available, instead a mode "ignore" is provided.
Note: Default dtype of returned values is float.
"""
if "constant" == mode:
raise RuntimeError('boundary mode not supported')
elif "ignore" == mode:
mode = "constant"
if 'image' == rang:
rang = tuple(numpy.percentile(image[mask], cutoffp))
elif not 2 == len(rang):
raise RuntimeError('the rang must contain exactly two elements or the string "image"')
_, bin_edges = numpy.histogram([], bins=bins, range=rang)
output = _get_output(numpy.float if None == output else output, image, shape = [bins] + list(image.shape))
# threshold the image into the histogram bins represented by the output images first dimension, treat last bin separately, since upper border is inclusive
for i in range(bins - 1):
output[i] = (image >= bin_edges[i]) & (image < bin_edges[i + 1])
output[-1] = (image >= bin_edges[-2]) & (image <= bin_edges[-1])
# apply the sum filter to each dimension, then normalize by dividing through the sum of elements in the bins of each histogram
for i in range(bins):
output[i] = sum_filter(output[i], size=size, footprint=footprint, output=None, mode=mode, cval=0.0, origin=origin)
divident = numpy.sum(output, 0)
divident[0 == divident] = 1
output /= divident
# Notes on modes:
# mode=constant with a cval outside histogram range for the histogram equals a mode=constant with a cval = 0 for the sum_filter
# mode=constant with a cval inside histogram range for the histogram has no equal for the sum_filter (and does not make much sense)
# mode=X for the histogram equals mode=X for the sum_filter
# treat as multi-spectral image which intensities to extracted
return _extract_feature(_extract_intensities, [h for h in output], mask) | python | def _extract_local_histogram(image, mask=slice(None), bins=19, rang="image", cutoffp=(0.0, 100.0), size=None, footprint=None, output=None, mode="ignore", origin=0):
"""
Internal, single-image version of @see local_histogram
Note: Values outside of the histograms range are not considered.
Note: Mode constant is not available, instead a mode "ignore" is provided.
Note: Default dtype of returned values is float.
"""
if "constant" == mode:
raise RuntimeError('boundary mode not supported')
elif "ignore" == mode:
mode = "constant"
if 'image' == rang:
rang = tuple(numpy.percentile(image[mask], cutoffp))
elif not 2 == len(rang):
raise RuntimeError('the rang must contain exactly two elements or the string "image"')
_, bin_edges = numpy.histogram([], bins=bins, range=rang)
output = _get_output(numpy.float if None == output else output, image, shape = [bins] + list(image.shape))
# threshold the image into the histogram bins represented by the output images first dimension, treat last bin separately, since upper border is inclusive
for i in range(bins - 1):
output[i] = (image >= bin_edges[i]) & (image < bin_edges[i + 1])
output[-1] = (image >= bin_edges[-2]) & (image <= bin_edges[-1])
# apply the sum filter to each dimension, then normalize by dividing through the sum of elements in the bins of each histogram
for i in range(bins):
output[i] = sum_filter(output[i], size=size, footprint=footprint, output=None, mode=mode, cval=0.0, origin=origin)
divident = numpy.sum(output, 0)
divident[0 == divident] = 1
output /= divident
# Notes on modes:
# mode=constant with a cval outside histogram range for the histogram equals a mode=constant with a cval = 0 for the sum_filter
# mode=constant with a cval inside histogram range for the histogram has no equal for the sum_filter (and does not make much sense)
# mode=X for the histogram equals mode=X for the sum_filter
# treat as multi-spectral image which intensities to extracted
return _extract_feature(_extract_intensities, [h for h in output], mask) | [
"def",
"_extract_local_histogram",
"(",
"image",
",",
"mask",
"=",
"slice",
"(",
"None",
")",
",",
"bins",
"=",
"19",
",",
"rang",
"=",
"\"image\"",
",",
"cutoffp",
"=",
"(",
"0.0",
",",
"100.0",
")",
",",
"size",
"=",
"None",
",",
"footprint",
"=",
"None",
",",
"output",
"=",
"None",
",",
"mode",
"=",
"\"ignore\"",
",",
"origin",
"=",
"0",
")",
":",
"if",
"\"constant\"",
"==",
"mode",
":",
"raise",
"RuntimeError",
"(",
"'boundary mode not supported'",
")",
"elif",
"\"ignore\"",
"==",
"mode",
":",
"mode",
"=",
"\"constant\"",
"if",
"'image'",
"==",
"rang",
":",
"rang",
"=",
"tuple",
"(",
"numpy",
".",
"percentile",
"(",
"image",
"[",
"mask",
"]",
",",
"cutoffp",
")",
")",
"elif",
"not",
"2",
"==",
"len",
"(",
"rang",
")",
":",
"raise",
"RuntimeError",
"(",
"'the rang must contain exactly two elements or the string \"image\"'",
")",
"_",
",",
"bin_edges",
"=",
"numpy",
".",
"histogram",
"(",
"[",
"]",
",",
"bins",
"=",
"bins",
",",
"range",
"=",
"rang",
")",
"output",
"=",
"_get_output",
"(",
"numpy",
".",
"float",
"if",
"None",
"==",
"output",
"else",
"output",
",",
"image",
",",
"shape",
"=",
"[",
"bins",
"]",
"+",
"list",
"(",
"image",
".",
"shape",
")",
")",
"# threshold the image into the histogram bins represented by the output images first dimension, treat last bin separately, since upper border is inclusive",
"for",
"i",
"in",
"range",
"(",
"bins",
"-",
"1",
")",
":",
"output",
"[",
"i",
"]",
"=",
"(",
"image",
">=",
"bin_edges",
"[",
"i",
"]",
")",
"&",
"(",
"image",
"<",
"bin_edges",
"[",
"i",
"+",
"1",
"]",
")",
"output",
"[",
"-",
"1",
"]",
"=",
"(",
"image",
">=",
"bin_edges",
"[",
"-",
"2",
"]",
")",
"&",
"(",
"image",
"<=",
"bin_edges",
"[",
"-",
"1",
"]",
")",
"# apply the sum filter to each dimension, then normalize by dividing through the sum of elements in the bins of each histogram",
"for",
"i",
"in",
"range",
"(",
"bins",
")",
":",
"output",
"[",
"i",
"]",
"=",
"sum_filter",
"(",
"output",
"[",
"i",
"]",
",",
"size",
"=",
"size",
",",
"footprint",
"=",
"footprint",
",",
"output",
"=",
"None",
",",
"mode",
"=",
"mode",
",",
"cval",
"=",
"0.0",
",",
"origin",
"=",
"origin",
")",
"divident",
"=",
"numpy",
".",
"sum",
"(",
"output",
",",
"0",
")",
"divident",
"[",
"0",
"==",
"divident",
"]",
"=",
"1",
"output",
"/=",
"divident",
"# Notes on modes:",
"# mode=constant with a cval outside histogram range for the histogram equals a mode=constant with a cval = 0 for the sum_filter",
"# mode=constant with a cval inside histogram range for the histogram has no equal for the sum_filter (and does not make much sense)",
"# mode=X for the histogram equals mode=X for the sum_filter",
"# treat as multi-spectral image which intensities to extracted",
"return",
"_extract_feature",
"(",
"_extract_intensities",
",",
"[",
"h",
"for",
"h",
"in",
"output",
"]",
",",
"mask",
")"
] | Internal, single-image version of @see local_histogram
Note: Values outside of the histograms range are not considered.
Note: Mode constant is not available, instead a mode "ignore" is provided.
Note: Default dtype of returned values is float. | [
"Internal",
"single",
"-",
"image",
"version",
"of"
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L587-L625 |
5,303 | loli/medpy | medpy/features/intensity.py | _extract_median | def _extract_median(image, mask = slice(None), size = 1, voxelspacing = None):
"""
Internal, single-image version of `median`.
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# determine structure element size in voxel units
size = _create_structure_array(size, voxelspacing)
return _extract_intensities(median_filter(image, size), mask) | python | def _extract_median(image, mask = slice(None), size = 1, voxelspacing = None):
"""
Internal, single-image version of `median`.
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# determine structure element size in voxel units
size = _create_structure_array(size, voxelspacing)
return _extract_intensities(median_filter(image, size), mask) | [
"def",
"_extract_median",
"(",
"image",
",",
"mask",
"=",
"slice",
"(",
"None",
")",
",",
"size",
"=",
"1",
",",
"voxelspacing",
"=",
"None",
")",
":",
"# set voxel spacing",
"if",
"voxelspacing",
"is",
"None",
":",
"voxelspacing",
"=",
"[",
"1.",
"]",
"*",
"image",
".",
"ndim",
"# determine structure element size in voxel units",
"size",
"=",
"_create_structure_array",
"(",
"size",
",",
"voxelspacing",
")",
"return",
"_extract_intensities",
"(",
"median_filter",
"(",
"image",
",",
"size",
")",
",",
"mask",
")"
] | Internal, single-image version of `median`. | [
"Internal",
"single",
"-",
"image",
"version",
"of",
"median",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L627-L638 |
5,304 | loli/medpy | medpy/features/intensity.py | _extract_gaussian_gradient_magnitude | def _extract_gaussian_gradient_magnitude(image, mask = slice(None), sigma = 1, voxelspacing = None):
"""
Internal, single-image version of `gaussian_gradient_magnitude`.
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# determine gaussian kernel size in voxel units
sigma = _create_structure_array(sigma, voxelspacing)
return _extract_intensities(scipy_gaussian_gradient_magnitude(image, sigma), mask) | python | def _extract_gaussian_gradient_magnitude(image, mask = slice(None), sigma = 1, voxelspacing = None):
"""
Internal, single-image version of `gaussian_gradient_magnitude`.
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# determine gaussian kernel size in voxel units
sigma = _create_structure_array(sigma, voxelspacing)
return _extract_intensities(scipy_gaussian_gradient_magnitude(image, sigma), mask) | [
"def",
"_extract_gaussian_gradient_magnitude",
"(",
"image",
",",
"mask",
"=",
"slice",
"(",
"None",
")",
",",
"sigma",
"=",
"1",
",",
"voxelspacing",
"=",
"None",
")",
":",
"# set voxel spacing",
"if",
"voxelspacing",
"is",
"None",
":",
"voxelspacing",
"=",
"[",
"1.",
"]",
"*",
"image",
".",
"ndim",
"# determine gaussian kernel size in voxel units",
"sigma",
"=",
"_create_structure_array",
"(",
"sigma",
",",
"voxelspacing",
")",
"return",
"_extract_intensities",
"(",
"scipy_gaussian_gradient_magnitude",
"(",
"image",
",",
"sigma",
")",
",",
"mask",
")"
] | Internal, single-image version of `gaussian_gradient_magnitude`. | [
"Internal",
"single",
"-",
"image",
"version",
"of",
"gaussian_gradient_magnitude",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L640-L651 |
5,305 | loli/medpy | medpy/features/intensity.py | _extract_shifted_mean_gauss | def _extract_shifted_mean_gauss(image, mask = slice(None), offset = None, sigma = 1, voxelspacing = None):
"""
Internal, single-image version of `shifted_mean_gauss`.
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# set offset
if offset is None:
offset = [0] * image.ndim
# determine gaussian kernel size in voxel units
sigma = _create_structure_array(sigma, voxelspacing)
# compute smoothed version of image
smoothed = gaussian_filter(image, sigma)
shifted = numpy.zeros_like(smoothed)
in_slicer = []
out_slicer = []
for o in offset:
in_slicer.append(slice(o, None))
out_slicer.append(slice(None, -1 * o))
shifted[out_slicer] = smoothed[in_slicer]
return _extract_intensities(shifted, mask) | python | def _extract_shifted_mean_gauss(image, mask = slice(None), offset = None, sigma = 1, voxelspacing = None):
"""
Internal, single-image version of `shifted_mean_gauss`.
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# set offset
if offset is None:
offset = [0] * image.ndim
# determine gaussian kernel size in voxel units
sigma = _create_structure_array(sigma, voxelspacing)
# compute smoothed version of image
smoothed = gaussian_filter(image, sigma)
shifted = numpy.zeros_like(smoothed)
in_slicer = []
out_slicer = []
for o in offset:
in_slicer.append(slice(o, None))
out_slicer.append(slice(None, -1 * o))
shifted[out_slicer] = smoothed[in_slicer]
return _extract_intensities(shifted, mask) | [
"def",
"_extract_shifted_mean_gauss",
"(",
"image",
",",
"mask",
"=",
"slice",
"(",
"None",
")",
",",
"offset",
"=",
"None",
",",
"sigma",
"=",
"1",
",",
"voxelspacing",
"=",
"None",
")",
":",
"# set voxel spacing",
"if",
"voxelspacing",
"is",
"None",
":",
"voxelspacing",
"=",
"[",
"1.",
"]",
"*",
"image",
".",
"ndim",
"# set offset",
"if",
"offset",
"is",
"None",
":",
"offset",
"=",
"[",
"0",
"]",
"*",
"image",
".",
"ndim",
"# determine gaussian kernel size in voxel units",
"sigma",
"=",
"_create_structure_array",
"(",
"sigma",
",",
"voxelspacing",
")",
"# compute smoothed version of image",
"smoothed",
"=",
"gaussian_filter",
"(",
"image",
",",
"sigma",
")",
"shifted",
"=",
"numpy",
".",
"zeros_like",
"(",
"smoothed",
")",
"in_slicer",
"=",
"[",
"]",
"out_slicer",
"=",
"[",
"]",
"for",
"o",
"in",
"offset",
":",
"in_slicer",
".",
"append",
"(",
"slice",
"(",
"o",
",",
"None",
")",
")",
"out_slicer",
".",
"append",
"(",
"slice",
"(",
"None",
",",
"-",
"1",
"*",
"o",
")",
")",
"shifted",
"[",
"out_slicer",
"]",
"=",
"smoothed",
"[",
"in_slicer",
"]",
"return",
"_extract_intensities",
"(",
"shifted",
",",
"mask",
")"
] | Internal, single-image version of `shifted_mean_gauss`. | [
"Internal",
"single",
"-",
"image",
"version",
"of",
"shifted_mean_gauss",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L653-L678 |
5,306 | loli/medpy | medpy/features/intensity.py | _extract_mask_distance | def _extract_mask_distance(image, mask = slice(None), voxelspacing = None):
"""
Internal, single-image version of `mask_distance`.
"""
if isinstance(mask, slice):
mask = numpy.ones(image.shape, numpy.bool)
distance_map = distance_transform_edt(mask, sampling=voxelspacing)
return _extract_intensities(distance_map, mask) | python | def _extract_mask_distance(image, mask = slice(None), voxelspacing = None):
"""
Internal, single-image version of `mask_distance`.
"""
if isinstance(mask, slice):
mask = numpy.ones(image.shape, numpy.bool)
distance_map = distance_transform_edt(mask, sampling=voxelspacing)
return _extract_intensities(distance_map, mask) | [
"def",
"_extract_mask_distance",
"(",
"image",
",",
"mask",
"=",
"slice",
"(",
"None",
")",
",",
"voxelspacing",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"mask",
",",
"slice",
")",
":",
"mask",
"=",
"numpy",
".",
"ones",
"(",
"image",
".",
"shape",
",",
"numpy",
".",
"bool",
")",
"distance_map",
"=",
"distance_transform_edt",
"(",
"mask",
",",
"sampling",
"=",
"voxelspacing",
")",
"return",
"_extract_intensities",
"(",
"distance_map",
",",
"mask",
")"
] | Internal, single-image version of `mask_distance`. | [
"Internal",
"single",
"-",
"image",
"version",
"of",
"mask_distance",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L680-L689 |
5,307 | loli/medpy | medpy/features/intensity.py | _extract_local_mean_gauss | def _extract_local_mean_gauss(image, mask = slice(None), sigma = 1, voxelspacing = None):
"""
Internal, single-image version of `local_mean_gauss`.
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# determine gaussian kernel size in voxel units
sigma = _create_structure_array(sigma, voxelspacing)
return _extract_intensities(gaussian_filter(image, sigma), mask) | python | def _extract_local_mean_gauss(image, mask = slice(None), sigma = 1, voxelspacing = None):
"""
Internal, single-image version of `local_mean_gauss`.
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# determine gaussian kernel size in voxel units
sigma = _create_structure_array(sigma, voxelspacing)
return _extract_intensities(gaussian_filter(image, sigma), mask) | [
"def",
"_extract_local_mean_gauss",
"(",
"image",
",",
"mask",
"=",
"slice",
"(",
"None",
")",
",",
"sigma",
"=",
"1",
",",
"voxelspacing",
"=",
"None",
")",
":",
"# set voxel spacing",
"if",
"voxelspacing",
"is",
"None",
":",
"voxelspacing",
"=",
"[",
"1.",
"]",
"*",
"image",
".",
"ndim",
"# determine gaussian kernel size in voxel units",
"sigma",
"=",
"_create_structure_array",
"(",
"sigma",
",",
"voxelspacing",
")",
"return",
"_extract_intensities",
"(",
"gaussian_filter",
"(",
"image",
",",
"sigma",
")",
",",
"mask",
")"
] | Internal, single-image version of `local_mean_gauss`. | [
"Internal",
"single",
"-",
"image",
"version",
"of",
"local_mean_gauss",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L691-L702 |
5,308 | loli/medpy | medpy/features/intensity.py | _extract_centerdistance | def _extract_centerdistance(image, mask = slice(None), voxelspacing = None):
"""
Internal, single-image version of `centerdistance`.
"""
image = numpy.array(image, copy=False)
if None == voxelspacing:
voxelspacing = [1.] * image.ndim
# get image center and an array holding the images indices
centers = [(x - 1) / 2. for x in image.shape]
indices = numpy.indices(image.shape, dtype=numpy.float)
# shift to center of image and correct spacing to real world coordinates
for dim_indices, c, vs in zip(indices, centers, voxelspacing):
dim_indices -= c
dim_indices *= vs
# compute euclidean distance to image center
return numpy.sqrt(numpy.sum(numpy.square(indices), 0))[mask].ravel() | python | def _extract_centerdistance(image, mask = slice(None), voxelspacing = None):
"""
Internal, single-image version of `centerdistance`.
"""
image = numpy.array(image, copy=False)
if None == voxelspacing:
voxelspacing = [1.] * image.ndim
# get image center and an array holding the images indices
centers = [(x - 1) / 2. for x in image.shape]
indices = numpy.indices(image.shape, dtype=numpy.float)
# shift to center of image and correct spacing to real world coordinates
for dim_indices, c, vs in zip(indices, centers, voxelspacing):
dim_indices -= c
dim_indices *= vs
# compute euclidean distance to image center
return numpy.sqrt(numpy.sum(numpy.square(indices), 0))[mask].ravel() | [
"def",
"_extract_centerdistance",
"(",
"image",
",",
"mask",
"=",
"slice",
"(",
"None",
")",
",",
"voxelspacing",
"=",
"None",
")",
":",
"image",
"=",
"numpy",
".",
"array",
"(",
"image",
",",
"copy",
"=",
"False",
")",
"if",
"None",
"==",
"voxelspacing",
":",
"voxelspacing",
"=",
"[",
"1.",
"]",
"*",
"image",
".",
"ndim",
"# get image center and an array holding the images indices",
"centers",
"=",
"[",
"(",
"x",
"-",
"1",
")",
"/",
"2.",
"for",
"x",
"in",
"image",
".",
"shape",
"]",
"indices",
"=",
"numpy",
".",
"indices",
"(",
"image",
".",
"shape",
",",
"dtype",
"=",
"numpy",
".",
"float",
")",
"# shift to center of image and correct spacing to real world coordinates",
"for",
"dim_indices",
",",
"c",
",",
"vs",
"in",
"zip",
"(",
"indices",
",",
"centers",
",",
"voxelspacing",
")",
":",
"dim_indices",
"-=",
"c",
"dim_indices",
"*=",
"vs",
"# compute euclidean distance to image center",
"return",
"numpy",
".",
"sqrt",
"(",
"numpy",
".",
"sum",
"(",
"numpy",
".",
"square",
"(",
"indices",
")",
",",
"0",
")",
")",
"[",
"mask",
"]",
".",
"ravel",
"(",
")"
] | Internal, single-image version of `centerdistance`. | [
"Internal",
"single",
"-",
"image",
"version",
"of",
"centerdistance",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L705-L724 |
5,309 | loli/medpy | medpy/features/intensity.py | _extract_intensities | def _extract_intensities(image, mask = slice(None)):
"""
Internal, single-image version of `intensities`.
"""
return numpy.array(image, copy=True)[mask].ravel() | python | def _extract_intensities(image, mask = slice(None)):
"""
Internal, single-image version of `intensities`.
"""
return numpy.array(image, copy=True)[mask].ravel() | [
"def",
"_extract_intensities",
"(",
"image",
",",
"mask",
"=",
"slice",
"(",
"None",
")",
")",
":",
"return",
"numpy",
".",
"array",
"(",
"image",
",",
"copy",
"=",
"True",
")",
"[",
"mask",
"]",
".",
"ravel",
"(",
")"
] | Internal, single-image version of `intensities`. | [
"Internal",
"single",
"-",
"image",
"version",
"of",
"intensities",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L727-L731 |
5,310 | loli/medpy | medpy/features/intensity.py | _substract_hemispheres | def _substract_hemispheres(active, reference, active_sigma, reference_sigma, voxel_spacing):
"""
Helper function for `_extract_hemispheric_difference`.
Smoothes both images and then substracts the reference from the active image.
"""
active_kernel = _create_structure_array(active_sigma, voxel_spacing)
active_smoothed = gaussian_filter(active, sigma = active_kernel)
reference_kernel = _create_structure_array(reference_sigma, voxel_spacing)
reference_smoothed = gaussian_filter(reference, sigma = reference_kernel)
return active_smoothed - reference_smoothed | python | def _substract_hemispheres(active, reference, active_sigma, reference_sigma, voxel_spacing):
"""
Helper function for `_extract_hemispheric_difference`.
Smoothes both images and then substracts the reference from the active image.
"""
active_kernel = _create_structure_array(active_sigma, voxel_spacing)
active_smoothed = gaussian_filter(active, sigma = active_kernel)
reference_kernel = _create_structure_array(reference_sigma, voxel_spacing)
reference_smoothed = gaussian_filter(reference, sigma = reference_kernel)
return active_smoothed - reference_smoothed | [
"def",
"_substract_hemispheres",
"(",
"active",
",",
"reference",
",",
"active_sigma",
",",
"reference_sigma",
",",
"voxel_spacing",
")",
":",
"active_kernel",
"=",
"_create_structure_array",
"(",
"active_sigma",
",",
"voxel_spacing",
")",
"active_smoothed",
"=",
"gaussian_filter",
"(",
"active",
",",
"sigma",
"=",
"active_kernel",
")",
"reference_kernel",
"=",
"_create_structure_array",
"(",
"reference_sigma",
",",
"voxel_spacing",
")",
"reference_smoothed",
"=",
"gaussian_filter",
"(",
"reference",
",",
"sigma",
"=",
"reference_kernel",
")",
"return",
"active_smoothed",
"-",
"reference_smoothed"
] | Helper function for `_extract_hemispheric_difference`.
Smoothes both images and then substracts the reference from the active image. | [
"Helper",
"function",
"for",
"_extract_hemispheric_difference",
".",
"Smoothes",
"both",
"images",
"and",
"then",
"substracts",
"the",
"reference",
"from",
"the",
"active",
"image",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L733-L744 |
5,311 | loli/medpy | doc/numpydoc/numpydoc/compiler_unparse.py | UnparseCompilerAst._dispatch | def _dispatch(self, tree):
"_dispatcher function, _dispatching tree type T to method _T."
if isinstance(tree, list):
for t in tree:
self._dispatch(t)
return
meth = getattr(self, "_"+tree.__class__.__name__)
if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
return
meth(tree) | python | def _dispatch(self, tree):
"_dispatcher function, _dispatching tree type T to method _T."
if isinstance(tree, list):
for t in tree:
self._dispatch(t)
return
meth = getattr(self, "_"+tree.__class__.__name__)
if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
return
meth(tree) | [
"def",
"_dispatch",
"(",
"self",
",",
"tree",
")",
":",
"if",
"isinstance",
"(",
"tree",
",",
"list",
")",
":",
"for",
"t",
"in",
"tree",
":",
"self",
".",
"_dispatch",
"(",
"t",
")",
"return",
"meth",
"=",
"getattr",
"(",
"self",
",",
"\"_\"",
"+",
"tree",
".",
"__class__",
".",
"__name__",
")",
"if",
"tree",
".",
"__class__",
".",
"__name__",
"==",
"'NoneType'",
"and",
"not",
"self",
".",
"_do_indent",
":",
"return",
"meth",
"(",
"tree",
")"
] | _dispatcher function, _dispatching tree type T to method _T. | [
"_dispatcher",
"function",
"_dispatching",
"tree",
"type",
"T",
"to",
"method",
"_T",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/compiler_unparse.py#L80-L89 |
5,312 | loli/medpy | doc/numpydoc/numpydoc/compiler_unparse.py | UnparseCompilerAst._AssAttr | def _AssAttr(self, t):
""" Handle assigning an attribute of an object
"""
self._dispatch(t.expr)
self._write('.'+t.attrname) | python | def _AssAttr(self, t):
""" Handle assigning an attribute of an object
"""
self._dispatch(t.expr)
self._write('.'+t.attrname) | [
"def",
"_AssAttr",
"(",
"self",
",",
"t",
")",
":",
"self",
".",
"_dispatch",
"(",
"t",
".",
"expr",
")",
"self",
".",
"_write",
"(",
"'.'",
"+",
"t",
".",
"attrname",
")"
] | Handle assigning an attribute of an object | [
"Handle",
"assigning",
"an",
"attribute",
"of",
"an",
"object"
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/compiler_unparse.py#L110-L114 |
5,313 | loli/medpy | doc/numpydoc/numpydoc/compiler_unparse.py | UnparseCompilerAst._Assign | def _Assign(self, t):
""" Expression Assignment such as "a = 1".
This only handles assignment in expressions. Keyword assignment
is handled separately.
"""
self._fill()
for target in t.nodes:
self._dispatch(target)
self._write(" = ")
self._dispatch(t.expr)
if not self._do_indent:
self._write('; ') | python | def _Assign(self, t):
""" Expression Assignment such as "a = 1".
This only handles assignment in expressions. Keyword assignment
is handled separately.
"""
self._fill()
for target in t.nodes:
self._dispatch(target)
self._write(" = ")
self._dispatch(t.expr)
if not self._do_indent:
self._write('; ') | [
"def",
"_Assign",
"(",
"self",
",",
"t",
")",
":",
"self",
".",
"_fill",
"(",
")",
"for",
"target",
"in",
"t",
".",
"nodes",
":",
"self",
".",
"_dispatch",
"(",
"target",
")",
"self",
".",
"_write",
"(",
"\" = \"",
")",
"self",
".",
"_dispatch",
"(",
"t",
".",
"expr",
")",
"if",
"not",
"self",
".",
"_do_indent",
":",
"self",
".",
"_write",
"(",
"'; '",
")"
] | Expression Assignment such as "a = 1".
This only handles assignment in expressions. Keyword assignment
is handled separately. | [
"Expression",
"Assignment",
"such",
"as",
"a",
"=",
"1",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/compiler_unparse.py#L116-L128 |
5,314 | loli/medpy | doc/numpydoc/numpydoc/compiler_unparse.py | UnparseCompilerAst._AssTuple | def _AssTuple(self, t):
""" Tuple on left hand side of an expression.
"""
# _write each elements, separated by a comma.
for element in t.nodes[:-1]:
self._dispatch(element)
self._write(", ")
# Handle the last one without writing comma
last_element = t.nodes[-1]
self._dispatch(last_element) | python | def _AssTuple(self, t):
""" Tuple on left hand side of an expression.
"""
# _write each elements, separated by a comma.
for element in t.nodes[:-1]:
self._dispatch(element)
self._write(", ")
# Handle the last one without writing comma
last_element = t.nodes[-1]
self._dispatch(last_element) | [
"def",
"_AssTuple",
"(",
"self",
",",
"t",
")",
":",
"# _write each elements, separated by a comma.",
"for",
"element",
"in",
"t",
".",
"nodes",
"[",
":",
"-",
"1",
"]",
":",
"self",
".",
"_dispatch",
"(",
"element",
")",
"self",
".",
"_write",
"(",
"\", \"",
")",
"# Handle the last one without writing comma",
"last_element",
"=",
"t",
".",
"nodes",
"[",
"-",
"1",
"]",
"self",
".",
"_dispatch",
"(",
"last_element",
")"
] | Tuple on left hand side of an expression. | [
"Tuple",
"on",
"left",
"hand",
"side",
"of",
"an",
"expression",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/compiler_unparse.py#L137-L148 |
5,315 | loli/medpy | doc/numpydoc/numpydoc/compiler_unparse.py | UnparseCompilerAst._CallFunc | def _CallFunc(self, t):
""" Function call.
"""
self._dispatch(t.node)
self._write("(")
comma = False
for e in t.args:
if comma: self._write(", ")
else: comma = True
self._dispatch(e)
if t.star_args:
if comma: self._write(", ")
else: comma = True
self._write("*")
self._dispatch(t.star_args)
if t.dstar_args:
if comma: self._write(", ")
else: comma = True
self._write("**")
self._dispatch(t.dstar_args)
self._write(")") | python | def _CallFunc(self, t):
""" Function call.
"""
self._dispatch(t.node)
self._write("(")
comma = False
for e in t.args:
if comma: self._write(", ")
else: comma = True
self._dispatch(e)
if t.star_args:
if comma: self._write(", ")
else: comma = True
self._write("*")
self._dispatch(t.star_args)
if t.dstar_args:
if comma: self._write(", ")
else: comma = True
self._write("**")
self._dispatch(t.dstar_args)
self._write(")") | [
"def",
"_CallFunc",
"(",
"self",
",",
"t",
")",
":",
"self",
".",
"_dispatch",
"(",
"t",
".",
"node",
")",
"self",
".",
"_write",
"(",
"\"(\"",
")",
"comma",
"=",
"False",
"for",
"e",
"in",
"t",
".",
"args",
":",
"if",
"comma",
":",
"self",
".",
"_write",
"(",
"\", \"",
")",
"else",
":",
"comma",
"=",
"True",
"self",
".",
"_dispatch",
"(",
"e",
")",
"if",
"t",
".",
"star_args",
":",
"if",
"comma",
":",
"self",
".",
"_write",
"(",
"\", \"",
")",
"else",
":",
"comma",
"=",
"True",
"self",
".",
"_write",
"(",
"\"*\"",
")",
"self",
".",
"_dispatch",
"(",
"t",
".",
"star_args",
")",
"if",
"t",
".",
"dstar_args",
":",
"if",
"comma",
":",
"self",
".",
"_write",
"(",
"\", \"",
")",
"else",
":",
"comma",
"=",
"True",
"self",
".",
"_write",
"(",
"\"**\"",
")",
"self",
".",
"_dispatch",
"(",
"t",
".",
"dstar_args",
")",
"self",
".",
"_write",
"(",
"\")\"",
")"
] | Function call. | [
"Function",
"call",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/compiler_unparse.py#L183-L203 |
5,316 | loli/medpy | doc/numpydoc/numpydoc/compiler_unparse.py | UnparseCompilerAst._From | def _From(self, t):
""" Handle "from xyz import foo, bar as baz".
"""
# fixme: Are From and ImportFrom handled differently?
self._fill("from ")
self._write(t.modname)
self._write(" import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname) | python | def _From(self, t):
""" Handle "from xyz import foo, bar as baz".
"""
# fixme: Are From and ImportFrom handled differently?
self._fill("from ")
self._write(t.modname)
self._write(" import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname) | [
"def",
"_From",
"(",
"self",
",",
"t",
")",
":",
"# fixme: Are From and ImportFrom handled differently?",
"self",
".",
"_fill",
"(",
"\"from \"",
")",
"self",
".",
"_write",
"(",
"t",
".",
"modname",
")",
"self",
".",
"_write",
"(",
"\" import \"",
")",
"for",
"i",
",",
"(",
"name",
",",
"asname",
")",
"in",
"enumerate",
"(",
"t",
".",
"names",
")",
":",
"if",
"i",
"!=",
"0",
":",
"self",
".",
"_write",
"(",
"\", \"",
")",
"self",
".",
"_write",
"(",
"name",
")",
"if",
"asname",
"is",
"not",
"None",
":",
"self",
".",
"_write",
"(",
"\" as \"",
"+",
"asname",
")"
] | Handle "from xyz import foo, bar as baz". | [
"Handle",
"from",
"xyz",
"import",
"foo",
"bar",
"as",
"baz",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/compiler_unparse.py#L244-L256 |
5,317 | loli/medpy | doc/numpydoc/numpydoc/compiler_unparse.py | UnparseCompilerAst._Function | def _Function(self, t):
""" Handle function definitions
"""
if t.decorators is not None:
self._fill("@")
self._dispatch(t.decorators)
self._fill("def "+t.name + "(")
defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
for i, arg in enumerate(zip(t.argnames, defaults)):
self._write(arg[0])
if arg[1] is not None:
self._write('=')
self._dispatch(arg[1])
if i < len(t.argnames)-1:
self._write(', ')
self._write(")")
if self._single_func:
self._do_indent = False
self._enter()
self._dispatch(t.code)
self._leave()
self._do_indent = True | python | def _Function(self, t):
""" Handle function definitions
"""
if t.decorators is not None:
self._fill("@")
self._dispatch(t.decorators)
self._fill("def "+t.name + "(")
defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
for i, arg in enumerate(zip(t.argnames, defaults)):
self._write(arg[0])
if arg[1] is not None:
self._write('=')
self._dispatch(arg[1])
if i < len(t.argnames)-1:
self._write(', ')
self._write(")")
if self._single_func:
self._do_indent = False
self._enter()
self._dispatch(t.code)
self._leave()
self._do_indent = True | [
"def",
"_Function",
"(",
"self",
",",
"t",
")",
":",
"if",
"t",
".",
"decorators",
"is",
"not",
"None",
":",
"self",
".",
"_fill",
"(",
"\"@\"",
")",
"self",
".",
"_dispatch",
"(",
"t",
".",
"decorators",
")",
"self",
".",
"_fill",
"(",
"\"def \"",
"+",
"t",
".",
"name",
"+",
"\"(\"",
")",
"defaults",
"=",
"[",
"None",
"]",
"*",
"(",
"len",
"(",
"t",
".",
"argnames",
")",
"-",
"len",
"(",
"t",
".",
"defaults",
")",
")",
"+",
"list",
"(",
"t",
".",
"defaults",
")",
"for",
"i",
",",
"arg",
"in",
"enumerate",
"(",
"zip",
"(",
"t",
".",
"argnames",
",",
"defaults",
")",
")",
":",
"self",
".",
"_write",
"(",
"arg",
"[",
"0",
"]",
")",
"if",
"arg",
"[",
"1",
"]",
"is",
"not",
"None",
":",
"self",
".",
"_write",
"(",
"'='",
")",
"self",
".",
"_dispatch",
"(",
"arg",
"[",
"1",
"]",
")",
"if",
"i",
"<",
"len",
"(",
"t",
".",
"argnames",
")",
"-",
"1",
":",
"self",
".",
"_write",
"(",
"', '",
")",
"self",
".",
"_write",
"(",
"\")\"",
")",
"if",
"self",
".",
"_single_func",
":",
"self",
".",
"_do_indent",
"=",
"False",
"self",
".",
"_enter",
"(",
")",
"self",
".",
"_dispatch",
"(",
"t",
".",
"code",
")",
"self",
".",
"_leave",
"(",
")",
"self",
".",
"_do_indent",
"=",
"True"
] | Handle function definitions | [
"Handle",
"function",
"definitions"
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/compiler_unparse.py#L258-L279 |
5,318 | loli/medpy | doc/numpydoc/numpydoc/compiler_unparse.py | UnparseCompilerAst._Getattr | def _Getattr(self, t):
""" Handle getting an attribute of an object
"""
if isinstance(t.expr, (Div, Mul, Sub, Add)):
self._write('(')
self._dispatch(t.expr)
self._write(')')
else:
self._dispatch(t.expr)
self._write('.'+t.attrname) | python | def _Getattr(self, t):
""" Handle getting an attribute of an object
"""
if isinstance(t.expr, (Div, Mul, Sub, Add)):
self._write('(')
self._dispatch(t.expr)
self._write(')')
else:
self._dispatch(t.expr)
self._write('.'+t.attrname) | [
"def",
"_Getattr",
"(",
"self",
",",
"t",
")",
":",
"if",
"isinstance",
"(",
"t",
".",
"expr",
",",
"(",
"Div",
",",
"Mul",
",",
"Sub",
",",
"Add",
")",
")",
":",
"self",
".",
"_write",
"(",
"'('",
")",
"self",
".",
"_dispatch",
"(",
"t",
".",
"expr",
")",
"self",
".",
"_write",
"(",
"')'",
")",
"else",
":",
"self",
".",
"_dispatch",
"(",
"t",
".",
"expr",
")",
"self",
".",
"_write",
"(",
"'.'",
"+",
"t",
".",
"attrname",
")"
] | Handle getting an attribute of an object | [
"Handle",
"getting",
"an",
"attribute",
"of",
"an",
"object"
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/compiler_unparse.py#L281-L291 |
5,319 | loli/medpy | doc/numpydoc/numpydoc/compiler_unparse.py | UnparseCompilerAst._Import | def _Import(self, t):
""" Handle "import xyz.foo".
"""
self._fill("import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname) | python | def _Import(self, t):
""" Handle "import xyz.foo".
"""
self._fill("import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname) | [
"def",
"_Import",
"(",
"self",
",",
"t",
")",
":",
"self",
".",
"_fill",
"(",
"\"import \"",
")",
"for",
"i",
",",
"(",
"name",
",",
"asname",
")",
"in",
"enumerate",
"(",
"t",
".",
"names",
")",
":",
"if",
"i",
"!=",
"0",
":",
"self",
".",
"_write",
"(",
"\", \"",
")",
"self",
".",
"_write",
"(",
"name",
")",
"if",
"asname",
"is",
"not",
"None",
":",
"self",
".",
"_write",
"(",
"\" as \"",
"+",
"asname",
")"
] | Handle "import xyz.foo". | [
"Handle",
"import",
"xyz",
".",
"foo",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/compiler_unparse.py#L326-L336 |
5,320 | loli/medpy | doc/numpydoc/numpydoc/compiler_unparse.py | UnparseCompilerAst._Keyword | def _Keyword(self, t):
""" Keyword value assignment within function calls and definitions.
"""
self._write(t.name)
self._write("=")
self._dispatch(t.expr) | python | def _Keyword(self, t):
""" Keyword value assignment within function calls and definitions.
"""
self._write(t.name)
self._write("=")
self._dispatch(t.expr) | [
"def",
"_Keyword",
"(",
"self",
",",
"t",
")",
":",
"self",
".",
"_write",
"(",
"t",
".",
"name",
")",
"self",
".",
"_write",
"(",
"\"=\"",
")",
"self",
".",
"_dispatch",
"(",
"t",
".",
"expr",
")"
] | Keyword value assignment within function calls and definitions. | [
"Keyword",
"value",
"assignment",
"within",
"function",
"calls",
"and",
"definitions",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/compiler_unparse.py#L338-L343 |
5,321 | loli/medpy | medpy/utilities/argparseu.py | __sequenceAscendingStrict | def __sequenceAscendingStrict(l):
"Test a sequences values to be in strictly ascending order."
it = iter(l)
next(it)
if not all(b > a for a, b in zip(l, it)):
raise argparse.ArgumentTypeError('All values must be given in strictly ascending order.')
return l | python | def __sequenceAscendingStrict(l):
"Test a sequences values to be in strictly ascending order."
it = iter(l)
next(it)
if not all(b > a for a, b in zip(l, it)):
raise argparse.ArgumentTypeError('All values must be given in strictly ascending order.')
return l | [
"def",
"__sequenceAscendingStrict",
"(",
"l",
")",
":",
"it",
"=",
"iter",
"(",
"l",
")",
"next",
"(",
"it",
")",
"if",
"not",
"all",
"(",
"b",
">",
"a",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"l",
",",
"it",
")",
")",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"'All values must be given in strictly ascending order.'",
")",
"return",
"l"
] | Test a sequences values to be in strictly ascending order. | [
"Test",
"a",
"sequences",
"values",
"to",
"be",
"in",
"strictly",
"ascending",
"order",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/utilities/argparseu.py#L238-L244 |
5,322 | loli/medpy | medpy/filter/IntensityRangeStandardization.py | IntensityRangeStandardization.__check_mapping | def __check_mapping(self, landmarks):
"""
Checks whether the image, from which the supplied landmarks were extracted, can
be transformed to the learned standard intensity space without loss of
information.
"""
sc_udiff = numpy.asarray(self.__sc_umaxs)[1:] - numpy.asarray(self.__sc_umins)[:-1]
l_diff = numpy.asarray(landmarks)[1:] - numpy.asarray(landmarks)[:-1]
return numpy.all(sc_udiff > numpy.asarray(l_diff)) | python | def __check_mapping(self, landmarks):
"""
Checks whether the image, from which the supplied landmarks were extracted, can
be transformed to the learned standard intensity space without loss of
information.
"""
sc_udiff = numpy.asarray(self.__sc_umaxs)[1:] - numpy.asarray(self.__sc_umins)[:-1]
l_diff = numpy.asarray(landmarks)[1:] - numpy.asarray(landmarks)[:-1]
return numpy.all(sc_udiff > numpy.asarray(l_diff)) | [
"def",
"__check_mapping",
"(",
"self",
",",
"landmarks",
")",
":",
"sc_udiff",
"=",
"numpy",
".",
"asarray",
"(",
"self",
".",
"__sc_umaxs",
")",
"[",
"1",
":",
"]",
"-",
"numpy",
".",
"asarray",
"(",
"self",
".",
"__sc_umins",
")",
"[",
":",
"-",
"1",
"]",
"l_diff",
"=",
"numpy",
".",
"asarray",
"(",
"landmarks",
")",
"[",
"1",
":",
"]",
"-",
"numpy",
".",
"asarray",
"(",
"landmarks",
")",
"[",
":",
"-",
"1",
"]",
"return",
"numpy",
".",
"all",
"(",
"sc_udiff",
">",
"numpy",
".",
"asarray",
"(",
"l_diff",
")",
")"
] | Checks whether the image, from which the supplied landmarks were extracted, can
be transformed to the learned standard intensity space without loss of
information. | [
"Checks",
"whether",
"the",
"image",
"from",
"which",
"the",
"supplied",
"landmarks",
"were",
"extracted",
"can",
"be",
"transformed",
"to",
"the",
"learned",
"standard",
"intensity",
"space",
"without",
"loss",
"of",
"information",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/IntensityRangeStandardization.py#L459-L467 |
5,323 | loli/medpy | medpy/filter/IntensityRangeStandardization.py | IntensityRangeStandardization.is_in_interval | def is_in_interval(n, l, r, border = 'included'):
"""
Checks whether a number is inside the interval l, r.
"""
if 'included' == border:
return (n >= l) and (n <= r)
elif 'excluded' == border:
return (n > l) and (n < r)
else:
raise ValueError('borders must be either \'included\' or \'excluded\'') | python | def is_in_interval(n, l, r, border = 'included'):
"""
Checks whether a number is inside the interval l, r.
"""
if 'included' == border:
return (n >= l) and (n <= r)
elif 'excluded' == border:
return (n > l) and (n < r)
else:
raise ValueError('borders must be either \'included\' or \'excluded\'') | [
"def",
"is_in_interval",
"(",
"n",
",",
"l",
",",
"r",
",",
"border",
"=",
"'included'",
")",
":",
"if",
"'included'",
"==",
"border",
":",
"return",
"(",
"n",
">=",
"l",
")",
"and",
"(",
"n",
"<=",
"r",
")",
"elif",
"'excluded'",
"==",
"border",
":",
"return",
"(",
"n",
">",
"l",
")",
"and",
"(",
"n",
"<",
"r",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'borders must be either \\'included\\' or \\'excluded\\''",
")"
] | Checks whether a number is inside the interval l, r. | [
"Checks",
"whether",
"a",
"number",
"is",
"inside",
"the",
"interval",
"l",
"r",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/IntensityRangeStandardization.py#L497-L506 |
5,324 | loli/medpy | medpy/filter/IntensityRangeStandardization.py | IntensityRangeStandardization.are_in_interval | def are_in_interval(s, l, r, border = 'included'):
"""
Checks whether all number in the sequence s lie inside the interval formed by
l and r.
"""
return numpy.all([IntensityRangeStandardization.is_in_interval(x, l, r, border) for x in s]) | python | def are_in_interval(s, l, r, border = 'included'):
"""
Checks whether all number in the sequence s lie inside the interval formed by
l and r.
"""
return numpy.all([IntensityRangeStandardization.is_in_interval(x, l, r, border) for x in s]) | [
"def",
"are_in_interval",
"(",
"s",
",",
"l",
",",
"r",
",",
"border",
"=",
"'included'",
")",
":",
"return",
"numpy",
".",
"all",
"(",
"[",
"IntensityRangeStandardization",
".",
"is_in_interval",
"(",
"x",
",",
"l",
",",
"r",
",",
"border",
")",
"for",
"x",
"in",
"s",
"]",
")"
] | Checks whether all number in the sequence s lie inside the interval formed by
l and r. | [
"Checks",
"whether",
"all",
"number",
"in",
"the",
"sequence",
"s",
"lie",
"inside",
"the",
"interval",
"formed",
"by",
"l",
"and",
"r",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/IntensityRangeStandardization.py#L509-L514 |
5,325 | loli/medpy | medpy/filter/houghtransform.py | template_sphere | def template_sphere (radius, dimensions):
r"""
Returns a spherical binary structure of a of the supplied radius that can be used as
template input to the generalized hough transform.
Parameters
----------
radius : integer
The circles radius in voxels.
dimensions : integer
The dimensionality of the circle
Returns
-------
template_sphere : ndarray
A boolean array containing a sphere.
"""
if int(dimensions) != dimensions:
raise TypeError('The supplied dimension parameter must be of type integer.')
dimensions = int(dimensions)
return template_ellipsoid(dimensions * [radius * 2]) | python | def template_sphere (radius, dimensions):
r"""
Returns a spherical binary structure of a of the supplied radius that can be used as
template input to the generalized hough transform.
Parameters
----------
radius : integer
The circles radius in voxels.
dimensions : integer
The dimensionality of the circle
Returns
-------
template_sphere : ndarray
A boolean array containing a sphere.
"""
if int(dimensions) != dimensions:
raise TypeError('The supplied dimension parameter must be of type integer.')
dimensions = int(dimensions)
return template_ellipsoid(dimensions * [radius * 2]) | [
"def",
"template_sphere",
"(",
"radius",
",",
"dimensions",
")",
":",
"if",
"int",
"(",
"dimensions",
")",
"!=",
"dimensions",
":",
"raise",
"TypeError",
"(",
"'The supplied dimension parameter must be of type integer.'",
")",
"dimensions",
"=",
"int",
"(",
"dimensions",
")",
"return",
"template_ellipsoid",
"(",
"dimensions",
"*",
"[",
"radius",
"*",
"2",
"]",
")"
] | r"""
Returns a spherical binary structure of a of the supplied radius that can be used as
template input to the generalized hough transform.
Parameters
----------
radius : integer
The circles radius in voxels.
dimensions : integer
The dimensionality of the circle
Returns
-------
template_sphere : ndarray
A boolean array containing a sphere. | [
"r",
"Returns",
"a",
"spherical",
"binary",
"structure",
"of",
"a",
"of",
"the",
"supplied",
"radius",
"that",
"can",
"be",
"used",
"as",
"template",
"input",
"to",
"the",
"generalized",
"hough",
"transform",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/houghtransform.py#L156-L177 |
5,326 | loli/medpy | doc/numpydoc/numpydoc/traitsdoc.py | looks_like_issubclass | def looks_like_issubclass(obj, classname):
""" Return True if the object has a class or superclass with the given class
name.
Ignores old-style classes.
"""
t = obj
if t.__name__ == classname:
return True
for klass in t.__mro__:
if klass.__name__ == classname:
return True
return False | python | def looks_like_issubclass(obj, classname):
""" Return True if the object has a class or superclass with the given class
name.
Ignores old-style classes.
"""
t = obj
if t.__name__ == classname:
return True
for klass in t.__mro__:
if klass.__name__ == classname:
return True
return False | [
"def",
"looks_like_issubclass",
"(",
"obj",
",",
"classname",
")",
":",
"t",
"=",
"obj",
"if",
"t",
".",
"__name__",
"==",
"classname",
":",
"return",
"True",
"for",
"klass",
"in",
"t",
".",
"__mro__",
":",
"if",
"klass",
".",
"__name__",
"==",
"classname",
":",
"return",
"True",
"return",
"False"
] | Return True if the object has a class or superclass with the given class
name.
Ignores old-style classes. | [
"Return",
"True",
"if",
"the",
"object",
"has",
"a",
"class",
"or",
"superclass",
"with",
"the",
"given",
"class",
"name",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/traitsdoc.py#L102-L114 |
5,327 | loli/medpy | medpy/filter/utilities.py | __make_footprint | def __make_footprint(input, size, footprint):
"Creates a standard footprint element ala scipy.ndimage."
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
return footprint | python | def __make_footprint(input, size, footprint):
"Creates a standard footprint element ala scipy.ndimage."
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
return footprint | [
"def",
"__make_footprint",
"(",
"input",
",",
"size",
",",
"footprint",
")",
":",
"if",
"footprint",
"is",
"None",
":",
"if",
"size",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"no footprint or filter size provided\"",
")",
"sizes",
"=",
"_ni_support",
".",
"_normalize_sequence",
"(",
"size",
",",
"input",
".",
"ndim",
")",
"footprint",
"=",
"numpy",
".",
"ones",
"(",
"sizes",
",",
"dtype",
"=",
"bool",
")",
"else",
":",
"footprint",
"=",
"numpy",
".",
"asarray",
"(",
"footprint",
",",
"dtype",
"=",
"bool",
")",
"return",
"footprint"
] | Creates a standard footprint element ala scipy.ndimage. | [
"Creates",
"a",
"standard",
"footprint",
"element",
"ala",
"scipy",
".",
"ndimage",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/utilities.py#L246-L255 |
5,328 | loli/medpy | medpy/graphcut/energy_label.py | __check_label_image | def __check_label_image(label_image):
"""Check the label image for consistent labelling starting from 1."""
encountered_indices = scipy.unique(label_image)
expected_indices = scipy.arange(1, label_image.max() + 1)
if not encountered_indices.size == expected_indices.size or \
not (encountered_indices == expected_indices).all():
raise AttributeError('The supplied label image does either not contain any regions or they are not labeled consecutively starting from 1.') | python | def __check_label_image(label_image):
"""Check the label image for consistent labelling starting from 1."""
encountered_indices = scipy.unique(label_image)
expected_indices = scipy.arange(1, label_image.max() + 1)
if not encountered_indices.size == expected_indices.size or \
not (encountered_indices == expected_indices).all():
raise AttributeError('The supplied label image does either not contain any regions or they are not labeled consecutively starting from 1.') | [
"def",
"__check_label_image",
"(",
"label_image",
")",
":",
"encountered_indices",
"=",
"scipy",
".",
"unique",
"(",
"label_image",
")",
"expected_indices",
"=",
"scipy",
".",
"arange",
"(",
"1",
",",
"label_image",
".",
"max",
"(",
")",
"+",
"1",
")",
"if",
"not",
"encountered_indices",
".",
"size",
"==",
"expected_indices",
".",
"size",
"or",
"not",
"(",
"encountered_indices",
"==",
"expected_indices",
")",
".",
"all",
"(",
")",
":",
"raise",
"AttributeError",
"(",
"'The supplied label image does either not contain any regions or they are not labeled consecutively starting from 1.'",
")"
] | Check the label image for consistent labelling starting from 1. | [
"Check",
"the",
"label",
"image",
"for",
"consistent",
"labelling",
"starting",
"from",
"1",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/graphcut/energy_label.py#L407-L413 |
5,329 | loli/medpy | bin/medpy_graphcut_label_bgreduced.py | __xd_iterator_pass_on | def __xd_iterator_pass_on(arr, view, fun):
"""
Like xd_iterator, but the fun return values are always passed on to the next and only the last returned.
"""
# create list of iterations
iterations = [[None] if dim in view else list(range(arr.shape[dim])) for dim in range(arr.ndim)]
# iterate, create slicer, execute function and collect results
passon = None
for indices in itertools.product(*iterations):
slicer = [slice(None) if idx is None else slice(idx, idx + 1) for idx in indices]
passon = fun(scipy.squeeze(arr[slicer]), passon)
return passon | python | def __xd_iterator_pass_on(arr, view, fun):
"""
Like xd_iterator, but the fun return values are always passed on to the next and only the last returned.
"""
# create list of iterations
iterations = [[None] if dim in view else list(range(arr.shape[dim])) for dim in range(arr.ndim)]
# iterate, create slicer, execute function and collect results
passon = None
for indices in itertools.product(*iterations):
slicer = [slice(None) if idx is None else slice(idx, idx + 1) for idx in indices]
passon = fun(scipy.squeeze(arr[slicer]), passon)
return passon | [
"def",
"__xd_iterator_pass_on",
"(",
"arr",
",",
"view",
",",
"fun",
")",
":",
"# create list of iterations",
"iterations",
"=",
"[",
"[",
"None",
"]",
"if",
"dim",
"in",
"view",
"else",
"list",
"(",
"range",
"(",
"arr",
".",
"shape",
"[",
"dim",
"]",
")",
")",
"for",
"dim",
"in",
"range",
"(",
"arr",
".",
"ndim",
")",
"]",
"# iterate, create slicer, execute function and collect results",
"passon",
"=",
"None",
"for",
"indices",
"in",
"itertools",
".",
"product",
"(",
"*",
"iterations",
")",
":",
"slicer",
"=",
"[",
"slice",
"(",
"None",
")",
"if",
"idx",
"is",
"None",
"else",
"slice",
"(",
"idx",
",",
"idx",
"+",
"1",
")",
"for",
"idx",
"in",
"indices",
"]",
"passon",
"=",
"fun",
"(",
"scipy",
".",
"squeeze",
"(",
"arr",
"[",
"slicer",
"]",
")",
",",
"passon",
")",
"return",
"passon"
] | Like xd_iterator, but the fun return values are always passed on to the next and only the last returned. | [
"Like",
"xd_iterator",
"but",
"the",
"fun",
"return",
"values",
"are",
"always",
"passed",
"on",
"to",
"the",
"next",
"and",
"only",
"the",
"last",
"returned",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/bin/medpy_graphcut_label_bgreduced.py#L176-L189 |
5,330 | loli/medpy | medpy/io/header.py | set_pixel_spacing | def set_pixel_spacing(hdr, spacing):
r"""Depreciated synonym of `~medpy.io.header.set_voxel_spacing`."""
warnings.warn('get_pixel_spacing() is depreciated, use set_voxel_spacing() instead', category=DeprecationWarning)
set_voxel_spacing(hdr, spacing) | python | def set_pixel_spacing(hdr, spacing):
r"""Depreciated synonym of `~medpy.io.header.set_voxel_spacing`."""
warnings.warn('get_pixel_spacing() is depreciated, use set_voxel_spacing() instead', category=DeprecationWarning)
set_voxel_spacing(hdr, spacing) | [
"def",
"set_pixel_spacing",
"(",
"hdr",
",",
"spacing",
")",
":",
"warnings",
".",
"warn",
"(",
"'get_pixel_spacing() is depreciated, use set_voxel_spacing() instead'",
",",
"category",
"=",
"DeprecationWarning",
")",
"set_voxel_spacing",
"(",
"hdr",
",",
"spacing",
")"
] | r"""Depreciated synonym of `~medpy.io.header.set_voxel_spacing`. | [
"r",
"Depreciated",
"synonym",
"of",
"~medpy",
".",
"io",
".",
"header",
".",
"set_voxel_spacing",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/io/header.py#L100-L103 |
5,331 | loli/medpy | medpy/io/header.py | Header.copy_to | def copy_to(self, sitkimage):
"""
Copy all stored meta information info to an sitk Image.
Note that only the spacing and the offset/origin information
are guaranteed to be preserved, although the method also
tries to copy other meta information such as DICOM tags.
Parameters
----------
sitkimage : sitk.Image
the sitk Image object to which to copy the information
Returns
-------
sitkimage : sitk.Image
the passed sitk Image object
"""
if self.sitkimage is not None:
for k in self.sitkimage.GetMetaDataKeys():
sitkimage.SetMetaData(k, self.sitkimage.GetMetaData(k))
ndim = len(sitkimage.GetSize())
spacing, offset, direction = self.get_info_consistent(ndim)
sitkimage.SetSpacing(spacing)
sitkimage.SetOrigin(offset)
sitkimage.SetDirection(tuple(direction.flatten()))
return sitkimage | python | def copy_to(self, sitkimage):
"""
Copy all stored meta information info to an sitk Image.
Note that only the spacing and the offset/origin information
are guaranteed to be preserved, although the method also
tries to copy other meta information such as DICOM tags.
Parameters
----------
sitkimage : sitk.Image
the sitk Image object to which to copy the information
Returns
-------
sitkimage : sitk.Image
the passed sitk Image object
"""
if self.sitkimage is not None:
for k in self.sitkimage.GetMetaDataKeys():
sitkimage.SetMetaData(k, self.sitkimage.GetMetaData(k))
ndim = len(sitkimage.GetSize())
spacing, offset, direction = self.get_info_consistent(ndim)
sitkimage.SetSpacing(spacing)
sitkimage.SetOrigin(offset)
sitkimage.SetDirection(tuple(direction.flatten()))
return sitkimage | [
"def",
"copy_to",
"(",
"self",
",",
"sitkimage",
")",
":",
"if",
"self",
".",
"sitkimage",
"is",
"not",
"None",
":",
"for",
"k",
"in",
"self",
".",
"sitkimage",
".",
"GetMetaDataKeys",
"(",
")",
":",
"sitkimage",
".",
"SetMetaData",
"(",
"k",
",",
"self",
".",
"sitkimage",
".",
"GetMetaData",
"(",
"k",
")",
")",
"ndim",
"=",
"len",
"(",
"sitkimage",
".",
"GetSize",
"(",
")",
")",
"spacing",
",",
"offset",
",",
"direction",
"=",
"self",
".",
"get_info_consistent",
"(",
"ndim",
")",
"sitkimage",
".",
"SetSpacing",
"(",
"spacing",
")",
"sitkimage",
".",
"SetOrigin",
"(",
"offset",
")",
"sitkimage",
".",
"SetDirection",
"(",
"tuple",
"(",
"direction",
".",
"flatten",
"(",
")",
")",
")",
"return",
"sitkimage"
] | Copy all stored meta information info to an sitk Image.
Note that only the spacing and the offset/origin information
are guaranteed to be preserved, although the method also
tries to copy other meta information such as DICOM tags.
Parameters
----------
sitkimage : sitk.Image
the sitk Image object to which to copy the information
Returns
-------
sitkimage : sitk.Image
the passed sitk Image object | [
"Copy",
"all",
"stored",
"meta",
"information",
"info",
"to",
"an",
"sitk",
"Image",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/io/header.py#L213-L242 |
5,332 | loli/medpy | medpy/io/header.py | Header.get_info_consistent | def get_info_consistent(self, ndim):
"""
Returns the main meta-data information adapted to the supplied
image dimensionality.
It will try to resolve inconsistencies and other conflicts,
altering the information avilable int he most plausible way.
Parameters
----------
ndim : int
image's dimensionality
Returns
-------
spacing : tuple of floats
offset : tuple of floats
direction : ndarray
"""
if ndim > len(self.spacing):
spacing = self.spacing + (1.0, ) * (ndim - len(self.spacing))
else:
spacing = self.spacing[:ndim]
if ndim > len(self.offset):
offset = self.offset + (0.0, ) * (ndim - len(self.offset))
else:
offset = self.offset[:ndim]
if ndim > self.direction.shape[0]:
direction = np.identity(ndim)
direction[:self.direction.shape[0], :self.direction.shape[0]] = self.direction
else:
direction = self.direction[:ndim, :ndim]
return spacing, offset, direction | python | def get_info_consistent(self, ndim):
"""
Returns the main meta-data information adapted to the supplied
image dimensionality.
It will try to resolve inconsistencies and other conflicts,
altering the information avilable int he most plausible way.
Parameters
----------
ndim : int
image's dimensionality
Returns
-------
spacing : tuple of floats
offset : tuple of floats
direction : ndarray
"""
if ndim > len(self.spacing):
spacing = self.spacing + (1.0, ) * (ndim - len(self.spacing))
else:
spacing = self.spacing[:ndim]
if ndim > len(self.offset):
offset = self.offset + (0.0, ) * (ndim - len(self.offset))
else:
offset = self.offset[:ndim]
if ndim > self.direction.shape[0]:
direction = np.identity(ndim)
direction[:self.direction.shape[0], :self.direction.shape[0]] = self.direction
else:
direction = self.direction[:ndim, :ndim]
return spacing, offset, direction | [
"def",
"get_info_consistent",
"(",
"self",
",",
"ndim",
")",
":",
"if",
"ndim",
">",
"len",
"(",
"self",
".",
"spacing",
")",
":",
"spacing",
"=",
"self",
".",
"spacing",
"+",
"(",
"1.0",
",",
")",
"*",
"(",
"ndim",
"-",
"len",
"(",
"self",
".",
"spacing",
")",
")",
"else",
":",
"spacing",
"=",
"self",
".",
"spacing",
"[",
":",
"ndim",
"]",
"if",
"ndim",
">",
"len",
"(",
"self",
".",
"offset",
")",
":",
"offset",
"=",
"self",
".",
"offset",
"+",
"(",
"0.0",
",",
")",
"*",
"(",
"ndim",
"-",
"len",
"(",
"self",
".",
"offset",
")",
")",
"else",
":",
"offset",
"=",
"self",
".",
"offset",
"[",
":",
"ndim",
"]",
"if",
"ndim",
">",
"self",
".",
"direction",
".",
"shape",
"[",
"0",
"]",
":",
"direction",
"=",
"np",
".",
"identity",
"(",
"ndim",
")",
"direction",
"[",
":",
"self",
".",
"direction",
".",
"shape",
"[",
"0",
"]",
",",
":",
"self",
".",
"direction",
".",
"shape",
"[",
"0",
"]",
"]",
"=",
"self",
".",
"direction",
"else",
":",
"direction",
"=",
"self",
".",
"direction",
"[",
":",
"ndim",
",",
":",
"ndim",
"]",
"return",
"spacing",
",",
"offset",
",",
"direction"
] | Returns the main meta-data information adapted to the supplied
image dimensionality.
It will try to resolve inconsistencies and other conflicts,
altering the information avilable int he most plausible way.
Parameters
----------
ndim : int
image's dimensionality
Returns
-------
spacing : tuple of floats
offset : tuple of floats
direction : ndarray | [
"Returns",
"the",
"main",
"meta",
"-",
"data",
"information",
"adapted",
"to",
"the",
"supplied",
"image",
"dimensionality",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/io/header.py#L244-L279 |
5,333 | loli/medpy | medpy/metric/binary.py | hd95 | def hd95(result, reference, voxelspacing=None, connectivity=1):
"""
95th percentile of the Hausdorff Distance.
Computes the 95th percentile of the (symmetric) Hausdorff Distance (HD) between the binary objects in two
images. Compared to the Hausdorff Distance, this metric is slightly more stable to small outliers and is
commonly used in Biomedical Segmentation challenges.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
Note that the connectivity influences the result in the case of the Hausdorff distance.
Returns
-------
hd : float
The symmetric Hausdorff Distance between the object(s) in ```result``` and the
object(s) in ```reference```. The distance unit is the same as for the spacing of
elements along each dimension, which is usually given in mm.
See also
--------
:func:`hd`
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
"""
hd1 = __surface_distances(result, reference, voxelspacing, connectivity)
hd2 = __surface_distances(reference, result, voxelspacing, connectivity)
hd95 = numpy.percentile(numpy.hstack((hd1, hd2)), 95)
return hd95 | python | def hd95(result, reference, voxelspacing=None, connectivity=1):
"""
95th percentile of the Hausdorff Distance.
Computes the 95th percentile of the (symmetric) Hausdorff Distance (HD) between the binary objects in two
images. Compared to the Hausdorff Distance, this metric is slightly more stable to small outliers and is
commonly used in Biomedical Segmentation challenges.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
Note that the connectivity influences the result in the case of the Hausdorff distance.
Returns
-------
hd : float
The symmetric Hausdorff Distance between the object(s) in ```result``` and the
object(s) in ```reference```. The distance unit is the same as for the spacing of
elements along each dimension, which is usually given in mm.
See also
--------
:func:`hd`
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
"""
hd1 = __surface_distances(result, reference, voxelspacing, connectivity)
hd2 = __surface_distances(reference, result, voxelspacing, connectivity)
hd95 = numpy.percentile(numpy.hstack((hd1, hd2)), 95)
return hd95 | [
"def",
"hd95",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
"=",
"None",
",",
"connectivity",
"=",
"1",
")",
":",
"hd1",
"=",
"__surface_distances",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
",",
"connectivity",
")",
"hd2",
"=",
"__surface_distances",
"(",
"reference",
",",
"result",
",",
"voxelspacing",
",",
"connectivity",
")",
"hd95",
"=",
"numpy",
".",
"percentile",
"(",
"numpy",
".",
"hstack",
"(",
"(",
"hd1",
",",
"hd2",
")",
")",
",",
"95",
")",
"return",
"hd95"
] | 95th percentile of the Hausdorff Distance.
Computes the 95th percentile of the (symmetric) Hausdorff Distance (HD) between the binary objects in two
images. Compared to the Hausdorff Distance, this metric is slightly more stable to small outliers and is
commonly used in Biomedical Segmentation challenges.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
Note that the connectivity influences the result in the case of the Hausdorff distance.
Returns
-------
hd : float
The symmetric Hausdorff Distance between the object(s) in ```result``` and the
object(s) in ```reference```. The distance unit is the same as for the spacing of
elements along each dimension, which is usually given in mm.
See also
--------
:func:`hd`
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order. | [
"95th",
"percentile",
"of",
"the",
"Hausdorff",
"Distance",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/binary.py#L354-L399 |
5,334 | loli/medpy | medpy/metric/binary.py | __surface_distances | def __surface_distances(result, reference, voxelspacing=None, connectivity=1):
"""
The distances between the surface voxel of binary objects in result and their
nearest partner surface voxel of a binary object in reference.
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
if voxelspacing is not None:
voxelspacing = _ni_support._normalize_sequence(voxelspacing, result.ndim)
voxelspacing = numpy.asarray(voxelspacing, dtype=numpy.float64)
if not voxelspacing.flags.contiguous:
voxelspacing = voxelspacing.copy()
# binary structure
footprint = generate_binary_structure(result.ndim, connectivity)
# test for emptiness
if 0 == numpy.count_nonzero(result):
raise RuntimeError('The first supplied array does not contain any binary object.')
if 0 == numpy.count_nonzero(reference):
raise RuntimeError('The second supplied array does not contain any binary object.')
# extract only 1-pixel border line of objects
result_border = result ^ binary_erosion(result, structure=footprint, iterations=1)
reference_border = reference ^ binary_erosion(reference, structure=footprint, iterations=1)
# compute average surface distance
# Note: scipys distance transform is calculated only inside the borders of the
# foreground objects, therefore the input has to be reversed
dt = distance_transform_edt(~reference_border, sampling=voxelspacing)
sds = dt[result_border]
return sds | python | def __surface_distances(result, reference, voxelspacing=None, connectivity=1):
"""
The distances between the surface voxel of binary objects in result and their
nearest partner surface voxel of a binary object in reference.
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
if voxelspacing is not None:
voxelspacing = _ni_support._normalize_sequence(voxelspacing, result.ndim)
voxelspacing = numpy.asarray(voxelspacing, dtype=numpy.float64)
if not voxelspacing.flags.contiguous:
voxelspacing = voxelspacing.copy()
# binary structure
footprint = generate_binary_structure(result.ndim, connectivity)
# test for emptiness
if 0 == numpy.count_nonzero(result):
raise RuntimeError('The first supplied array does not contain any binary object.')
if 0 == numpy.count_nonzero(reference):
raise RuntimeError('The second supplied array does not contain any binary object.')
# extract only 1-pixel border line of objects
result_border = result ^ binary_erosion(result, structure=footprint, iterations=1)
reference_border = reference ^ binary_erosion(reference, structure=footprint, iterations=1)
# compute average surface distance
# Note: scipys distance transform is calculated only inside the borders of the
# foreground objects, therefore the input has to be reversed
dt = distance_transform_edt(~reference_border, sampling=voxelspacing)
sds = dt[result_border]
return sds | [
"def",
"__surface_distances",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
"=",
"None",
",",
"connectivity",
"=",
"1",
")",
":",
"result",
"=",
"numpy",
".",
"atleast_1d",
"(",
"result",
".",
"astype",
"(",
"numpy",
".",
"bool",
")",
")",
"reference",
"=",
"numpy",
".",
"atleast_1d",
"(",
"reference",
".",
"astype",
"(",
"numpy",
".",
"bool",
")",
")",
"if",
"voxelspacing",
"is",
"not",
"None",
":",
"voxelspacing",
"=",
"_ni_support",
".",
"_normalize_sequence",
"(",
"voxelspacing",
",",
"result",
".",
"ndim",
")",
"voxelspacing",
"=",
"numpy",
".",
"asarray",
"(",
"voxelspacing",
",",
"dtype",
"=",
"numpy",
".",
"float64",
")",
"if",
"not",
"voxelspacing",
".",
"flags",
".",
"contiguous",
":",
"voxelspacing",
"=",
"voxelspacing",
".",
"copy",
"(",
")",
"# binary structure",
"footprint",
"=",
"generate_binary_structure",
"(",
"result",
".",
"ndim",
",",
"connectivity",
")",
"# test for emptiness",
"if",
"0",
"==",
"numpy",
".",
"count_nonzero",
"(",
"result",
")",
":",
"raise",
"RuntimeError",
"(",
"'The first supplied array does not contain any binary object.'",
")",
"if",
"0",
"==",
"numpy",
".",
"count_nonzero",
"(",
"reference",
")",
":",
"raise",
"RuntimeError",
"(",
"'The second supplied array does not contain any binary object.'",
")",
"# extract only 1-pixel border line of objects",
"result_border",
"=",
"result",
"^",
"binary_erosion",
"(",
"result",
",",
"structure",
"=",
"footprint",
",",
"iterations",
"=",
"1",
")",
"reference_border",
"=",
"reference",
"^",
"binary_erosion",
"(",
"reference",
",",
"structure",
"=",
"footprint",
",",
"iterations",
"=",
"1",
")",
"# compute average surface distance ",
"# Note: scipys distance transform is calculated only inside the borders of the",
"# foreground objects, therefore the input has to be reversed",
"dt",
"=",
"distance_transform_edt",
"(",
"~",
"reference_border",
",",
"sampling",
"=",
"voxelspacing",
")",
"sds",
"=",
"dt",
"[",
"result_border",
"]",
"return",
"sds"
] | The distances between the surface voxel of binary objects in result and their
nearest partner surface voxel of a binary object in reference. | [
"The",
"distances",
"between",
"the",
"surface",
"voxel",
"of",
"binary",
"objects",
"in",
"result",
"and",
"their",
"nearest",
"partner",
"surface",
"voxel",
"of",
"a",
"binary",
"object",
"in",
"reference",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/binary.py#L1195-L1227 |
5,335 | loli/medpy | medpy/metric/histogram.py | __minowski_low_positive_integer_p | def __minowski_low_positive_integer_p(h1, h2, p = 2): # 11..43 us for p = 1..24 \w 100 bins
"""
A faster implementation of the Minowski distance for positive integer < 25.
@note do not use this function directly, but the general @link minowski() method.
@note the passed histograms must be scipy arrays.
"""
mult = scipy.absolute(h1 - h2)
dif = mult
for _ in range(p - 1): dif = scipy.multiply(dif, mult)
return math.pow(scipy.sum(dif), 1./p) | python | def __minowski_low_positive_integer_p(h1, h2, p = 2): # 11..43 us for p = 1..24 \w 100 bins
"""
A faster implementation of the Minowski distance for positive integer < 25.
@note do not use this function directly, but the general @link minowski() method.
@note the passed histograms must be scipy arrays.
"""
mult = scipy.absolute(h1 - h2)
dif = mult
for _ in range(p - 1): dif = scipy.multiply(dif, mult)
return math.pow(scipy.sum(dif), 1./p) | [
"def",
"__minowski_low_positive_integer_p",
"(",
"h1",
",",
"h2",
",",
"p",
"=",
"2",
")",
":",
"# 11..43 us for p = 1..24 \\w 100 bins",
"mult",
"=",
"scipy",
".",
"absolute",
"(",
"h1",
"-",
"h2",
")",
"dif",
"=",
"mult",
"for",
"_",
"in",
"range",
"(",
"p",
"-",
"1",
")",
":",
"dif",
"=",
"scipy",
".",
"multiply",
"(",
"dif",
",",
"mult",
")",
"return",
"math",
".",
"pow",
"(",
"scipy",
".",
"sum",
"(",
"dif",
")",
",",
"1.",
"/",
"p",
")"
] | A faster implementation of the Minowski distance for positive integer < 25.
@note do not use this function directly, but the general @link minowski() method.
@note the passed histograms must be scipy arrays. | [
"A",
"faster",
"implementation",
"of",
"the",
"Minowski",
"distance",
"for",
"positive",
"integer",
"<",
"25",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/histogram.py#L95-L104 |
5,336 | loli/medpy | medpy/metric/histogram.py | __kullback_leibler | def __kullback_leibler(h1, h2): # 36.3 us
"""
The actual KL implementation. @see kullback_leibler() for details.
Expects the histograms to be of type scipy.ndarray.
"""
result = h1.astype(scipy.float_)
mask = h1 != 0
result[mask] = scipy.multiply(h1[mask], scipy.log(h1[mask] / h2[mask]))
return scipy.sum(result) | python | def __kullback_leibler(h1, h2): # 36.3 us
"""
The actual KL implementation. @see kullback_leibler() for details.
Expects the histograms to be of type scipy.ndarray.
"""
result = h1.astype(scipy.float_)
mask = h1 != 0
result[mask] = scipy.multiply(h1[mask], scipy.log(h1[mask] / h2[mask]))
return scipy.sum(result) | [
"def",
"__kullback_leibler",
"(",
"h1",
",",
"h2",
")",
":",
"# 36.3 us",
"result",
"=",
"h1",
".",
"astype",
"(",
"scipy",
".",
"float_",
")",
"mask",
"=",
"h1",
"!=",
"0",
"result",
"[",
"mask",
"]",
"=",
"scipy",
".",
"multiply",
"(",
"h1",
"[",
"mask",
"]",
",",
"scipy",
".",
"log",
"(",
"h1",
"[",
"mask",
"]",
"/",
"h2",
"[",
"mask",
"]",
")",
")",
"return",
"scipy",
".",
"sum",
"(",
"result",
")"
] | The actual KL implementation. @see kullback_leibler() for details.
Expects the histograms to be of type scipy.ndarray. | [
"The",
"actual",
"KL",
"implementation",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/histogram.py#L562-L570 |
5,337 | loli/medpy | medpy/metric/histogram.py | __prepare_histogram | def __prepare_histogram(h1, h2):
"""Convert the histograms to scipy.ndarrays if required."""
h1 = h1 if scipy.ndarray == type(h1) else scipy.asarray(h1)
h2 = h2 if scipy.ndarray == type(h2) else scipy.asarray(h2)
if h1.shape != h2.shape or h1.size != h2.size:
raise ValueError('h1 and h2 must be of same shape and size')
return h1, h2 | python | def __prepare_histogram(h1, h2):
"""Convert the histograms to scipy.ndarrays if required."""
h1 = h1 if scipy.ndarray == type(h1) else scipy.asarray(h1)
h2 = h2 if scipy.ndarray == type(h2) else scipy.asarray(h2)
if h1.shape != h2.shape or h1.size != h2.size:
raise ValueError('h1 and h2 must be of same shape and size')
return h1, h2 | [
"def",
"__prepare_histogram",
"(",
"h1",
",",
"h2",
")",
":",
"h1",
"=",
"h1",
"if",
"scipy",
".",
"ndarray",
"==",
"type",
"(",
"h1",
")",
"else",
"scipy",
".",
"asarray",
"(",
"h1",
")",
"h2",
"=",
"h2",
"if",
"scipy",
".",
"ndarray",
"==",
"type",
"(",
"h2",
")",
"else",
"scipy",
".",
"asarray",
"(",
"h2",
")",
"if",
"h1",
".",
"shape",
"!=",
"h2",
".",
"shape",
"or",
"h1",
".",
"size",
"!=",
"h2",
".",
"size",
":",
"raise",
"ValueError",
"(",
"'h1 and h2 must be of same shape and size'",
")",
"return",
"h1",
",",
"h2"
] | Convert the histograms to scipy.ndarrays if required. | [
"Convert",
"the",
"histograms",
"to",
"scipy",
".",
"ndarrays",
"if",
"required",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/histogram.py#L1246-L1252 |
5,338 | minrk/findspark | findspark.py | find | def find():
"""Find a local spark installation.
Will first check the SPARK_HOME env variable, and otherwise
search common installation locations, e.g. from homebrew
"""
spark_home = os.environ.get('SPARK_HOME', None)
if not spark_home:
for path in [
'/usr/local/opt/apache-spark/libexec', # OS X Homebrew
'/usr/lib/spark/', # AWS Amazon EMR
'/usr/local/spark/', # common linux path for spark
'/opt/spark/', # other common linux path for spark
# Any other common places to look?
]:
if os.path.exists(path):
spark_home = path
break
if not spark_home:
raise ValueError("Couldn't find Spark, make sure SPARK_HOME env is set"
" or Spark is in an expected location (e.g. from homebrew installation).")
return spark_home | python | def find():
"""Find a local spark installation.
Will first check the SPARK_HOME env variable, and otherwise
search common installation locations, e.g. from homebrew
"""
spark_home = os.environ.get('SPARK_HOME', None)
if not spark_home:
for path in [
'/usr/local/opt/apache-spark/libexec', # OS X Homebrew
'/usr/lib/spark/', # AWS Amazon EMR
'/usr/local/spark/', # common linux path for spark
'/opt/spark/', # other common linux path for spark
# Any other common places to look?
]:
if os.path.exists(path):
spark_home = path
break
if not spark_home:
raise ValueError("Couldn't find Spark, make sure SPARK_HOME env is set"
" or Spark is in an expected location (e.g. from homebrew installation).")
return spark_home | [
"def",
"find",
"(",
")",
":",
"spark_home",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'SPARK_HOME'",
",",
"None",
")",
"if",
"not",
"spark_home",
":",
"for",
"path",
"in",
"[",
"'/usr/local/opt/apache-spark/libexec'",
",",
"# OS X Homebrew",
"'/usr/lib/spark/'",
",",
"# AWS Amazon EMR",
"'/usr/local/spark/'",
",",
"# common linux path for spark",
"'/opt/spark/'",
",",
"# other common linux path for spark",
"# Any other common places to look?",
"]",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"spark_home",
"=",
"path",
"break",
"if",
"not",
"spark_home",
":",
"raise",
"ValueError",
"(",
"\"Couldn't find Spark, make sure SPARK_HOME env is set\"",
"\" or Spark is in an expected location (e.g. from homebrew installation).\"",
")",
"return",
"spark_home"
] | Find a local spark installation.
Will first check the SPARK_HOME env variable, and otherwise
search common installation locations, e.g. from homebrew | [
"Find",
"a",
"local",
"spark",
"installation",
"."
] | 20c945d5136269ca56b1341786c49087faa7c75e | https://github.com/minrk/findspark/blob/20c945d5136269ca56b1341786c49087faa7c75e/findspark.py#L14-L38 |
5,339 | minrk/findspark | findspark.py | change_rc | def change_rc(spark_home, spark_python, py4j):
"""Persists changes to environment by changing shell config.
Adds lines to .bashrc to set environment variables
including the adding of dependencies to the system path. Will only
edit this file if they already exist. Currently only works for bash.
Parameters
----------
spark_home : str
Path to Spark installation.
spark_python : str
Path to python subdirectory of Spark installation.
py4j : str
Path to py4j library.
"""
bashrc_location = os.path.expanduser("~/.bashrc")
if os.path.isfile(bashrc_location):
with open(bashrc_location, 'a') as bashrc:
bashrc.write("\n# Added by findspark\n")
bashrc.write("export SPARK_HOME=" + spark_home + "\n")
bashrc.write("export PYTHONPATH=" + spark_python + ":" +
py4j + ":$PYTHONPATH\n\n") | python | def change_rc(spark_home, spark_python, py4j):
"""Persists changes to environment by changing shell config.
Adds lines to .bashrc to set environment variables
including the adding of dependencies to the system path. Will only
edit this file if they already exist. Currently only works for bash.
Parameters
----------
spark_home : str
Path to Spark installation.
spark_python : str
Path to python subdirectory of Spark installation.
py4j : str
Path to py4j library.
"""
bashrc_location = os.path.expanduser("~/.bashrc")
if os.path.isfile(bashrc_location):
with open(bashrc_location, 'a') as bashrc:
bashrc.write("\n# Added by findspark\n")
bashrc.write("export SPARK_HOME=" + spark_home + "\n")
bashrc.write("export PYTHONPATH=" + spark_python + ":" +
py4j + ":$PYTHONPATH\n\n") | [
"def",
"change_rc",
"(",
"spark_home",
",",
"spark_python",
",",
"py4j",
")",
":",
"bashrc_location",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.bashrc\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"bashrc_location",
")",
":",
"with",
"open",
"(",
"bashrc_location",
",",
"'a'",
")",
"as",
"bashrc",
":",
"bashrc",
".",
"write",
"(",
"\"\\n# Added by findspark\\n\"",
")",
"bashrc",
".",
"write",
"(",
"\"export SPARK_HOME=\"",
"+",
"spark_home",
"+",
"\"\\n\"",
")",
"bashrc",
".",
"write",
"(",
"\"export PYTHONPATH=\"",
"+",
"spark_python",
"+",
"\":\"",
"+",
"py4j",
"+",
"\":$PYTHONPATH\\n\\n\"",
")"
] | Persists changes to environment by changing shell config.
Adds lines to .bashrc to set environment variables
including the adding of dependencies to the system path. Will only
edit this file if they already exist. Currently only works for bash.
Parameters
----------
spark_home : str
Path to Spark installation.
spark_python : str
Path to python subdirectory of Spark installation.
py4j : str
Path to py4j library. | [
"Persists",
"changes",
"to",
"environment",
"by",
"changing",
"shell",
"config",
"."
] | 20c945d5136269ca56b1341786c49087faa7c75e | https://github.com/minrk/findspark/blob/20c945d5136269ca56b1341786c49087faa7c75e/findspark.py#L41-L65 |
5,340 | minrk/findspark | findspark.py | edit_ipython_profile | def edit_ipython_profile(spark_home, spark_python, py4j):
"""Adds a startup file to the current IPython profile to import pyspark.
The startup file sets the required environment variables and imports pyspark.
Parameters
----------
spark_home : str
Path to Spark installation.
spark_python : str
Path to python subdirectory of Spark installation.
py4j : str
Path to py4j library.
"""
from IPython import get_ipython
ip = get_ipython()
if ip:
profile_dir = ip.profile_dir.location
else:
from IPython.utils.path import locate_profile
profile_dir = locate_profile()
startup_file_loc = os.path.join(profile_dir, "startup", "findspark.py")
with open(startup_file_loc, 'w') as startup_file:
#Lines of code to be run when IPython starts
startup_file.write("import sys, os\n")
startup_file.write("os.environ['SPARK_HOME'] = '" + spark_home + "'\n")
startup_file.write("sys.path[:0] = " + str([spark_python, py4j]) + "\n")
startup_file.write("import pyspark\n") | python | def edit_ipython_profile(spark_home, spark_python, py4j):
"""Adds a startup file to the current IPython profile to import pyspark.
The startup file sets the required environment variables and imports pyspark.
Parameters
----------
spark_home : str
Path to Spark installation.
spark_python : str
Path to python subdirectory of Spark installation.
py4j : str
Path to py4j library.
"""
from IPython import get_ipython
ip = get_ipython()
if ip:
profile_dir = ip.profile_dir.location
else:
from IPython.utils.path import locate_profile
profile_dir = locate_profile()
startup_file_loc = os.path.join(profile_dir, "startup", "findspark.py")
with open(startup_file_loc, 'w') as startup_file:
#Lines of code to be run when IPython starts
startup_file.write("import sys, os\n")
startup_file.write("os.environ['SPARK_HOME'] = '" + spark_home + "'\n")
startup_file.write("sys.path[:0] = " + str([spark_python, py4j]) + "\n")
startup_file.write("import pyspark\n") | [
"def",
"edit_ipython_profile",
"(",
"spark_home",
",",
"spark_python",
",",
"py4j",
")",
":",
"from",
"IPython",
"import",
"get_ipython",
"ip",
"=",
"get_ipython",
"(",
")",
"if",
"ip",
":",
"profile_dir",
"=",
"ip",
".",
"profile_dir",
".",
"location",
"else",
":",
"from",
"IPython",
".",
"utils",
".",
"path",
"import",
"locate_profile",
"profile_dir",
"=",
"locate_profile",
"(",
")",
"startup_file_loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"profile_dir",
",",
"\"startup\"",
",",
"\"findspark.py\"",
")",
"with",
"open",
"(",
"startup_file_loc",
",",
"'w'",
")",
"as",
"startup_file",
":",
"#Lines of code to be run when IPython starts",
"startup_file",
".",
"write",
"(",
"\"import sys, os\\n\"",
")",
"startup_file",
".",
"write",
"(",
"\"os.environ['SPARK_HOME'] = '\"",
"+",
"spark_home",
"+",
"\"'\\n\"",
")",
"startup_file",
".",
"write",
"(",
"\"sys.path[:0] = \"",
"+",
"str",
"(",
"[",
"spark_python",
",",
"py4j",
"]",
")",
"+",
"\"\\n\"",
")",
"startup_file",
".",
"write",
"(",
"\"import pyspark\\n\"",
")"
] | Adds a startup file to the current IPython profile to import pyspark.
The startup file sets the required environment variables and imports pyspark.
Parameters
----------
spark_home : str
Path to Spark installation.
spark_python : str
Path to python subdirectory of Spark installation.
py4j : str
Path to py4j library. | [
"Adds",
"a",
"startup",
"file",
"to",
"the",
"current",
"IPython",
"profile",
"to",
"import",
"pyspark",
"."
] | 20c945d5136269ca56b1341786c49087faa7c75e | https://github.com/minrk/findspark/blob/20c945d5136269ca56b1341786c49087faa7c75e/findspark.py#L68-L98 |
5,341 | minrk/findspark | findspark.py | init | def init(spark_home=None, python_path=None, edit_rc=False, edit_profile=False):
"""Make pyspark importable.
Sets environment variables and adds dependencies to sys.path.
If no Spark location is provided, will try to find an installation.
Parameters
----------
spark_home : str, optional, default = None
Path to Spark installation, will try to find automatically
if not provided.
python_path : str, optional, default = None
Path to Python for Spark workers (PYSPARK_PYTHON),
will use the currently running Python if not provided.
edit_rc : bool, optional, default = False
Whether to attempt to persist changes by appending to shell
config.
edit_profile : bool, optional, default = False
Whether to create an IPython startup file to automatically
configure and import pyspark.
"""
if not spark_home:
spark_home = find()
if not python_path:
python_path = os.environ.get('PYSPARK_PYTHON', sys.executable)
# ensure SPARK_HOME is defined
os.environ['SPARK_HOME'] = spark_home
# ensure PYSPARK_PYTHON is defined
os.environ['PYSPARK_PYTHON'] = python_path
if not os.environ.get("PYSPARK_SUBMIT_ARGS", None):
os.environ["PYSPARK_SUBMIT_ARGS"] = ''
# add pyspark to sys.path
spark_python = os.path.join(spark_home, 'python')
py4j = glob(os.path.join(spark_python, 'lib', 'py4j-*.zip'))[0]
sys.path[:0] = [spark_python, py4j]
if edit_rc:
change_rc(spark_home, spark_python, py4j)
if edit_profile:
edit_ipython_profile(spark_home, spark_python, py4j) | python | def init(spark_home=None, python_path=None, edit_rc=False, edit_profile=False):
"""Make pyspark importable.
Sets environment variables and adds dependencies to sys.path.
If no Spark location is provided, will try to find an installation.
Parameters
----------
spark_home : str, optional, default = None
Path to Spark installation, will try to find automatically
if not provided.
python_path : str, optional, default = None
Path to Python for Spark workers (PYSPARK_PYTHON),
will use the currently running Python if not provided.
edit_rc : bool, optional, default = False
Whether to attempt to persist changes by appending to shell
config.
edit_profile : bool, optional, default = False
Whether to create an IPython startup file to automatically
configure and import pyspark.
"""
if not spark_home:
spark_home = find()
if not python_path:
python_path = os.environ.get('PYSPARK_PYTHON', sys.executable)
# ensure SPARK_HOME is defined
os.environ['SPARK_HOME'] = spark_home
# ensure PYSPARK_PYTHON is defined
os.environ['PYSPARK_PYTHON'] = python_path
if not os.environ.get("PYSPARK_SUBMIT_ARGS", None):
os.environ["PYSPARK_SUBMIT_ARGS"] = ''
# add pyspark to sys.path
spark_python = os.path.join(spark_home, 'python')
py4j = glob(os.path.join(spark_python, 'lib', 'py4j-*.zip'))[0]
sys.path[:0] = [spark_python, py4j]
if edit_rc:
change_rc(spark_home, spark_python, py4j)
if edit_profile:
edit_ipython_profile(spark_home, spark_python, py4j) | [
"def",
"init",
"(",
"spark_home",
"=",
"None",
",",
"python_path",
"=",
"None",
",",
"edit_rc",
"=",
"False",
",",
"edit_profile",
"=",
"False",
")",
":",
"if",
"not",
"spark_home",
":",
"spark_home",
"=",
"find",
"(",
")",
"if",
"not",
"python_path",
":",
"python_path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PYSPARK_PYTHON'",
",",
"sys",
".",
"executable",
")",
"# ensure SPARK_HOME is defined",
"os",
".",
"environ",
"[",
"'SPARK_HOME'",
"]",
"=",
"spark_home",
"# ensure PYSPARK_PYTHON is defined",
"os",
".",
"environ",
"[",
"'PYSPARK_PYTHON'",
"]",
"=",
"python_path",
"if",
"not",
"os",
".",
"environ",
".",
"get",
"(",
"\"PYSPARK_SUBMIT_ARGS\"",
",",
"None",
")",
":",
"os",
".",
"environ",
"[",
"\"PYSPARK_SUBMIT_ARGS\"",
"]",
"=",
"''",
"# add pyspark to sys.path",
"spark_python",
"=",
"os",
".",
"path",
".",
"join",
"(",
"spark_home",
",",
"'python'",
")",
"py4j",
"=",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"spark_python",
",",
"'lib'",
",",
"'py4j-*.zip'",
")",
")",
"[",
"0",
"]",
"sys",
".",
"path",
"[",
":",
"0",
"]",
"=",
"[",
"spark_python",
",",
"py4j",
"]",
"if",
"edit_rc",
":",
"change_rc",
"(",
"spark_home",
",",
"spark_python",
",",
"py4j",
")",
"if",
"edit_profile",
":",
"edit_ipython_profile",
"(",
"spark_home",
",",
"spark_python",
",",
"py4j",
")"
] | Make pyspark importable.
Sets environment variables and adds dependencies to sys.path.
If no Spark location is provided, will try to find an installation.
Parameters
----------
spark_home : str, optional, default = None
Path to Spark installation, will try to find automatically
if not provided.
python_path : str, optional, default = None
Path to Python for Spark workers (PYSPARK_PYTHON),
will use the currently running Python if not provided.
edit_rc : bool, optional, default = False
Whether to attempt to persist changes by appending to shell
config.
edit_profile : bool, optional, default = False
Whether to create an IPython startup file to automatically
configure and import pyspark. | [
"Make",
"pyspark",
"importable",
"."
] | 20c945d5136269ca56b1341786c49087faa7c75e | https://github.com/minrk/findspark/blob/20c945d5136269ca56b1341786c49087faa7c75e/findspark.py#L101-L147 |
5,342 | minrk/findspark | findspark.py | _add_to_submit_args | def _add_to_submit_args(s):
"""Adds string s to the PYSPARK_SUBMIT_ARGS env var"""
new_args = os.environ.get("PYSPARK_SUBMIT_ARGS", "") + (" %s" % s)
os.environ["PYSPARK_SUBMIT_ARGS"] = new_args
return new_args | python | def _add_to_submit_args(s):
"""Adds string s to the PYSPARK_SUBMIT_ARGS env var"""
new_args = os.environ.get("PYSPARK_SUBMIT_ARGS", "") + (" %s" % s)
os.environ["PYSPARK_SUBMIT_ARGS"] = new_args
return new_args | [
"def",
"_add_to_submit_args",
"(",
"s",
")",
":",
"new_args",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"PYSPARK_SUBMIT_ARGS\"",
",",
"\"\"",
")",
"+",
"(",
"\" %s\"",
"%",
"s",
")",
"os",
".",
"environ",
"[",
"\"PYSPARK_SUBMIT_ARGS\"",
"]",
"=",
"new_args",
"return",
"new_args"
] | Adds string s to the PYSPARK_SUBMIT_ARGS env var | [
"Adds",
"string",
"s",
"to",
"the",
"PYSPARK_SUBMIT_ARGS",
"env",
"var"
] | 20c945d5136269ca56b1341786c49087faa7c75e | https://github.com/minrk/findspark/blob/20c945d5136269ca56b1341786c49087faa7c75e/findspark.py#L149-L153 |
5,343 | minrk/findspark | findspark.py | add_packages | def add_packages(packages):
"""Add external packages to the pyspark interpreter.
Set the PYSPARK_SUBMIT_ARGS properly.
Parameters
----------
packages: list of package names in string format
"""
#if the parameter is a string, convert to a single element list
if isinstance(packages,str):
packages = [packages]
_add_to_submit_args("--packages "+ ",".join(packages) +" pyspark-shell") | python | def add_packages(packages):
"""Add external packages to the pyspark interpreter.
Set the PYSPARK_SUBMIT_ARGS properly.
Parameters
----------
packages: list of package names in string format
"""
#if the parameter is a string, convert to a single element list
if isinstance(packages,str):
packages = [packages]
_add_to_submit_args("--packages "+ ",".join(packages) +" pyspark-shell") | [
"def",
"add_packages",
"(",
"packages",
")",
":",
"#if the parameter is a string, convert to a single element list",
"if",
"isinstance",
"(",
"packages",
",",
"str",
")",
":",
"packages",
"=",
"[",
"packages",
"]",
"_add_to_submit_args",
"(",
"\"--packages \"",
"+",
"\",\"",
".",
"join",
"(",
"packages",
")",
"+",
"\" pyspark-shell\"",
")"
] | Add external packages to the pyspark interpreter.
Set the PYSPARK_SUBMIT_ARGS properly.
Parameters
----------
packages: list of package names in string format | [
"Add",
"external",
"packages",
"to",
"the",
"pyspark",
"interpreter",
"."
] | 20c945d5136269ca56b1341786c49087faa7c75e | https://github.com/minrk/findspark/blob/20c945d5136269ca56b1341786c49087faa7c75e/findspark.py#L155-L169 |
5,344 | minrk/findspark | findspark.py | add_jars | def add_jars(jars):
"""Add external jars to the pyspark interpreter.
Set the PYSPARK_SUBMIT_ARGS properly.
Parameters
----------
jars: list of path to jars in string format
"""
#if the parameter is a string, convert to a single element list
if isinstance(jars,str):
jars = [jars]
_add_to_submit_args("--jars "+ ",".join(jars) +" pyspark-shell") | python | def add_jars(jars):
"""Add external jars to the pyspark interpreter.
Set the PYSPARK_SUBMIT_ARGS properly.
Parameters
----------
jars: list of path to jars in string format
"""
#if the parameter is a string, convert to a single element list
if isinstance(jars,str):
jars = [jars]
_add_to_submit_args("--jars "+ ",".join(jars) +" pyspark-shell") | [
"def",
"add_jars",
"(",
"jars",
")",
":",
"#if the parameter is a string, convert to a single element list",
"if",
"isinstance",
"(",
"jars",
",",
"str",
")",
":",
"jars",
"=",
"[",
"jars",
"]",
"_add_to_submit_args",
"(",
"\"--jars \"",
"+",
"\",\"",
".",
"join",
"(",
"jars",
")",
"+",
"\" pyspark-shell\"",
")"
] | Add external jars to the pyspark interpreter.
Set the PYSPARK_SUBMIT_ARGS properly.
Parameters
----------
jars: list of path to jars in string format | [
"Add",
"external",
"jars",
"to",
"the",
"pyspark",
"interpreter",
"."
] | 20c945d5136269ca56b1341786c49087faa7c75e | https://github.com/minrk/findspark/blob/20c945d5136269ca56b1341786c49087faa7c75e/findspark.py#L171-L185 |
5,345 | sdispater/cleo | cleo/parser.py | Parser.parse | def parse(cls, expression):
"""
Parse the given console command definition into a dict.
:param expression: The expression to parse
:type expression: str
:rtype: dict
"""
parsed = {"name": None, "arguments": [], "options": []}
if not expression.strip():
raise ValueError("Console command signature is empty.")
expression = expression.replace(os.linesep, "")
matches = re.match(r"[^\s]+", expression)
if not matches:
raise ValueError("Unable to determine command name from signature.")
name = matches.group(0)
parsed["name"] = name
tokens = re.findall(r"\{\s*(.*?)\s*\}", expression)
if tokens:
parsed.update(cls._parameters(tokens))
return parsed | python | def parse(cls, expression):
"""
Parse the given console command definition into a dict.
:param expression: The expression to parse
:type expression: str
:rtype: dict
"""
parsed = {"name": None, "arguments": [], "options": []}
if not expression.strip():
raise ValueError("Console command signature is empty.")
expression = expression.replace(os.linesep, "")
matches = re.match(r"[^\s]+", expression)
if not matches:
raise ValueError("Unable to determine command name from signature.")
name = matches.group(0)
parsed["name"] = name
tokens = re.findall(r"\{\s*(.*?)\s*\}", expression)
if tokens:
parsed.update(cls._parameters(tokens))
return parsed | [
"def",
"parse",
"(",
"cls",
",",
"expression",
")",
":",
"parsed",
"=",
"{",
"\"name\"",
":",
"None",
",",
"\"arguments\"",
":",
"[",
"]",
",",
"\"options\"",
":",
"[",
"]",
"}",
"if",
"not",
"expression",
".",
"strip",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Console command signature is empty.\"",
")",
"expression",
"=",
"expression",
".",
"replace",
"(",
"os",
".",
"linesep",
",",
"\"\"",
")",
"matches",
"=",
"re",
".",
"match",
"(",
"r\"[^\\s]+\"",
",",
"expression",
")",
"if",
"not",
"matches",
":",
"raise",
"ValueError",
"(",
"\"Unable to determine command name from signature.\"",
")",
"name",
"=",
"matches",
".",
"group",
"(",
"0",
")",
"parsed",
"[",
"\"name\"",
"]",
"=",
"name",
"tokens",
"=",
"re",
".",
"findall",
"(",
"r\"\\{\\s*(.*?)\\s*\\}\"",
",",
"expression",
")",
"if",
"tokens",
":",
"parsed",
".",
"update",
"(",
"cls",
".",
"_parameters",
"(",
"tokens",
")",
")",
"return",
"parsed"
] | Parse the given console command definition into a dict.
:param expression: The expression to parse
:type expression: str
:rtype: dict | [
"Parse",
"the",
"given",
"console",
"command",
"definition",
"into",
"a",
"dict",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/parser.py#L16-L45 |
5,346 | sdispater/cleo | cleo/parser.py | Parser._parameters | def _parameters(cls, tokens):
"""
Extract all of the parameters from the tokens.
:param tokens: The tokens to extract the parameters from
:type tokens: list
:rtype: dict
"""
arguments = []
options = []
for token in tokens:
if not token.startswith("--"):
arguments.append(cls._parse_argument(token))
else:
options.append(cls._parse_option(token))
return {"arguments": arguments, "options": options} | python | def _parameters(cls, tokens):
"""
Extract all of the parameters from the tokens.
:param tokens: The tokens to extract the parameters from
:type tokens: list
:rtype: dict
"""
arguments = []
options = []
for token in tokens:
if not token.startswith("--"):
arguments.append(cls._parse_argument(token))
else:
options.append(cls._parse_option(token))
return {"arguments": arguments, "options": options} | [
"def",
"_parameters",
"(",
"cls",
",",
"tokens",
")",
":",
"arguments",
"=",
"[",
"]",
"options",
"=",
"[",
"]",
"for",
"token",
"in",
"tokens",
":",
"if",
"not",
"token",
".",
"startswith",
"(",
"\"--\"",
")",
":",
"arguments",
".",
"append",
"(",
"cls",
".",
"_parse_argument",
"(",
"token",
")",
")",
"else",
":",
"options",
".",
"append",
"(",
"cls",
".",
"_parse_option",
"(",
"token",
")",
")",
"return",
"{",
"\"arguments\"",
":",
"arguments",
",",
"\"options\"",
":",
"options",
"}"
] | Extract all of the parameters from the tokens.
:param tokens: The tokens to extract the parameters from
:type tokens: list
:rtype: dict | [
"Extract",
"all",
"of",
"the",
"parameters",
"from",
"the",
"tokens",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/parser.py#L48-L66 |
5,347 | sdispater/cleo | cleo/parser.py | Parser._parse_argument | def _parse_argument(cls, token):
"""
Parse an argument expression.
:param token: The argument expression
:type token: str
:rtype: InputArgument
"""
description = ""
validator = None
if " : " in token:
token, description = tuple(token.split(" : ", 2))
token = token.strip()
description = description.strip()
# Checking validator:
matches = re.match(r"(.*)\((.*?)\)", token)
if matches:
token = matches.group(1).strip()
validator = matches.group(2).strip()
if token.endswith("?*"):
return _argument(
token.rstrip("?*"),
Argument.MULTI_VALUED | Argument.OPTIONAL,
description,
None,
)
elif token.endswith("*"):
return _argument(
token.rstrip("*"),
Argument.MULTI_VALUED | Argument.REQUIRED,
description,
None,
)
elif token.endswith("?"):
return _argument(token.rstrip("?"), Argument.OPTIONAL, description, None)
matches = re.match(r"(.+)=(.+)", token)
if matches:
return _argument(
matches.group(1), Argument.OPTIONAL, description, matches.group(2)
)
return _argument(token, Argument.REQUIRED, description, None) | python | def _parse_argument(cls, token):
"""
Parse an argument expression.
:param token: The argument expression
:type token: str
:rtype: InputArgument
"""
description = ""
validator = None
if " : " in token:
token, description = tuple(token.split(" : ", 2))
token = token.strip()
description = description.strip()
# Checking validator:
matches = re.match(r"(.*)\((.*?)\)", token)
if matches:
token = matches.group(1).strip()
validator = matches.group(2).strip()
if token.endswith("?*"):
return _argument(
token.rstrip("?*"),
Argument.MULTI_VALUED | Argument.OPTIONAL,
description,
None,
)
elif token.endswith("*"):
return _argument(
token.rstrip("*"),
Argument.MULTI_VALUED | Argument.REQUIRED,
description,
None,
)
elif token.endswith("?"):
return _argument(token.rstrip("?"), Argument.OPTIONAL, description, None)
matches = re.match(r"(.+)=(.+)", token)
if matches:
return _argument(
matches.group(1), Argument.OPTIONAL, description, matches.group(2)
)
return _argument(token, Argument.REQUIRED, description, None) | [
"def",
"_parse_argument",
"(",
"cls",
",",
"token",
")",
":",
"description",
"=",
"\"\"",
"validator",
"=",
"None",
"if",
"\" : \"",
"in",
"token",
":",
"token",
",",
"description",
"=",
"tuple",
"(",
"token",
".",
"split",
"(",
"\" : \"",
",",
"2",
")",
")",
"token",
"=",
"token",
".",
"strip",
"(",
")",
"description",
"=",
"description",
".",
"strip",
"(",
")",
"# Checking validator:",
"matches",
"=",
"re",
".",
"match",
"(",
"r\"(.*)\\((.*?)\\)\"",
",",
"token",
")",
"if",
"matches",
":",
"token",
"=",
"matches",
".",
"group",
"(",
"1",
")",
".",
"strip",
"(",
")",
"validator",
"=",
"matches",
".",
"group",
"(",
"2",
")",
".",
"strip",
"(",
")",
"if",
"token",
".",
"endswith",
"(",
"\"?*\"",
")",
":",
"return",
"_argument",
"(",
"token",
".",
"rstrip",
"(",
"\"?*\"",
")",
",",
"Argument",
".",
"MULTI_VALUED",
"|",
"Argument",
".",
"OPTIONAL",
",",
"description",
",",
"None",
",",
")",
"elif",
"token",
".",
"endswith",
"(",
"\"*\"",
")",
":",
"return",
"_argument",
"(",
"token",
".",
"rstrip",
"(",
"\"*\"",
")",
",",
"Argument",
".",
"MULTI_VALUED",
"|",
"Argument",
".",
"REQUIRED",
",",
"description",
",",
"None",
",",
")",
"elif",
"token",
".",
"endswith",
"(",
"\"?\"",
")",
":",
"return",
"_argument",
"(",
"token",
".",
"rstrip",
"(",
"\"?\"",
")",
",",
"Argument",
".",
"OPTIONAL",
",",
"description",
",",
"None",
")",
"matches",
"=",
"re",
".",
"match",
"(",
"r\"(.+)=(.+)\"",
",",
"token",
")",
"if",
"matches",
":",
"return",
"_argument",
"(",
"matches",
".",
"group",
"(",
"1",
")",
",",
"Argument",
".",
"OPTIONAL",
",",
"description",
",",
"matches",
".",
"group",
"(",
"2",
")",
")",
"return",
"_argument",
"(",
"token",
",",
"Argument",
".",
"REQUIRED",
",",
"description",
",",
"None",
")"
] | Parse an argument expression.
:param token: The argument expression
:type token: str
:rtype: InputArgument | [
"Parse",
"an",
"argument",
"expression",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/parser.py#L69-L117 |
5,348 | sdispater/cleo | cleo/parser.py | Parser._parse_option | def _parse_option(cls, token):
"""
Parse an option expression.
:param token: The option expression
:type token: str
:rtype: InputOption
"""
description = ""
validator = None
if " : " in token:
token, description = tuple(token.split(" : ", 2))
token = token.strip()
description = description.strip()
# Checking validator:
matches = re.match(r"(.*)\((.*?)\)", token)
if matches:
token = matches.group(1).strip()
validator = matches.group(2).strip()
shortcut = None
matches = re.split(r"\s*\|\s*", token, 2)
if len(matches) > 1:
shortcut = matches[0].lstrip("-")
token = matches[1]
else:
token = token.lstrip("-")
default = None
mode = Option.NO_VALUE
if token.endswith("=*"):
mode = Option.MULTI_VALUED
token = token.rstrip("=*")
elif token.endswith("=?*"):
mode = Option.MULTI_VALUED
token = token.rstrip("=?*")
elif token.endswith("=?"):
mode = Option.OPTIONAL_VALUE
token = token.rstrip("=?")
elif token.endswith("="):
mode = Option.REQUIRED_VALUE
token = token.rstrip("=")
matches = re.match(r"(.+)(=[?*]*)(.+)", token)
if matches:
token = matches.group(1)
operator = matches.group(2)
default = matches.group(3)
if operator == "=*":
mode = Option.REQUIRED_VALUE | Option.MULTI_VALUED
elif operator == "=?*":
mode = Option.MULTI_VALUED
elif operator == "=?":
mode = Option.OPTIONAL_VALUE
elif operator == "=":
mode = Option.REQUIRED_VALUE
return _option(token, shortcut, mode, description, default) | python | def _parse_option(cls, token):
"""
Parse an option expression.
:param token: The option expression
:type token: str
:rtype: InputOption
"""
description = ""
validator = None
if " : " in token:
token, description = tuple(token.split(" : ", 2))
token = token.strip()
description = description.strip()
# Checking validator:
matches = re.match(r"(.*)\((.*?)\)", token)
if matches:
token = matches.group(1).strip()
validator = matches.group(2).strip()
shortcut = None
matches = re.split(r"\s*\|\s*", token, 2)
if len(matches) > 1:
shortcut = matches[0].lstrip("-")
token = matches[1]
else:
token = token.lstrip("-")
default = None
mode = Option.NO_VALUE
if token.endswith("=*"):
mode = Option.MULTI_VALUED
token = token.rstrip("=*")
elif token.endswith("=?*"):
mode = Option.MULTI_VALUED
token = token.rstrip("=?*")
elif token.endswith("=?"):
mode = Option.OPTIONAL_VALUE
token = token.rstrip("=?")
elif token.endswith("="):
mode = Option.REQUIRED_VALUE
token = token.rstrip("=")
matches = re.match(r"(.+)(=[?*]*)(.+)", token)
if matches:
token = matches.group(1)
operator = matches.group(2)
default = matches.group(3)
if operator == "=*":
mode = Option.REQUIRED_VALUE | Option.MULTI_VALUED
elif operator == "=?*":
mode = Option.MULTI_VALUED
elif operator == "=?":
mode = Option.OPTIONAL_VALUE
elif operator == "=":
mode = Option.REQUIRED_VALUE
return _option(token, shortcut, mode, description, default) | [
"def",
"_parse_option",
"(",
"cls",
",",
"token",
")",
":",
"description",
"=",
"\"\"",
"validator",
"=",
"None",
"if",
"\" : \"",
"in",
"token",
":",
"token",
",",
"description",
"=",
"tuple",
"(",
"token",
".",
"split",
"(",
"\" : \"",
",",
"2",
")",
")",
"token",
"=",
"token",
".",
"strip",
"(",
")",
"description",
"=",
"description",
".",
"strip",
"(",
")",
"# Checking validator:",
"matches",
"=",
"re",
".",
"match",
"(",
"r\"(.*)\\((.*?)\\)\"",
",",
"token",
")",
"if",
"matches",
":",
"token",
"=",
"matches",
".",
"group",
"(",
"1",
")",
".",
"strip",
"(",
")",
"validator",
"=",
"matches",
".",
"group",
"(",
"2",
")",
".",
"strip",
"(",
")",
"shortcut",
"=",
"None",
"matches",
"=",
"re",
".",
"split",
"(",
"r\"\\s*\\|\\s*\"",
",",
"token",
",",
"2",
")",
"if",
"len",
"(",
"matches",
")",
">",
"1",
":",
"shortcut",
"=",
"matches",
"[",
"0",
"]",
".",
"lstrip",
"(",
"\"-\"",
")",
"token",
"=",
"matches",
"[",
"1",
"]",
"else",
":",
"token",
"=",
"token",
".",
"lstrip",
"(",
"\"-\"",
")",
"default",
"=",
"None",
"mode",
"=",
"Option",
".",
"NO_VALUE",
"if",
"token",
".",
"endswith",
"(",
"\"=*\"",
")",
":",
"mode",
"=",
"Option",
".",
"MULTI_VALUED",
"token",
"=",
"token",
".",
"rstrip",
"(",
"\"=*\"",
")",
"elif",
"token",
".",
"endswith",
"(",
"\"=?*\"",
")",
":",
"mode",
"=",
"Option",
".",
"MULTI_VALUED",
"token",
"=",
"token",
".",
"rstrip",
"(",
"\"=?*\"",
")",
"elif",
"token",
".",
"endswith",
"(",
"\"=?\"",
")",
":",
"mode",
"=",
"Option",
".",
"OPTIONAL_VALUE",
"token",
"=",
"token",
".",
"rstrip",
"(",
"\"=?\"",
")",
"elif",
"token",
".",
"endswith",
"(",
"\"=\"",
")",
":",
"mode",
"=",
"Option",
".",
"REQUIRED_VALUE",
"token",
"=",
"token",
".",
"rstrip",
"(",
"\"=\"",
")",
"matches",
"=",
"re",
".",
"match",
"(",
"r\"(.+)(=[?*]*)(.+)\"",
",",
"token",
")",
"if",
"matches",
":",
"token",
"=",
"matches",
".",
"group",
"(",
"1",
")",
"operator",
"=",
"matches",
".",
"group",
"(",
"2",
")",
"default",
"=",
"matches",
".",
"group",
"(",
"3",
")",
"if",
"operator",
"==",
"\"=*\"",
":",
"mode",
"=",
"Option",
".",
"REQUIRED_VALUE",
"|",
"Option",
".",
"MULTI_VALUED",
"elif",
"operator",
"==",
"\"=?*\"",
":",
"mode",
"=",
"Option",
".",
"MULTI_VALUED",
"elif",
"operator",
"==",
"\"=?\"",
":",
"mode",
"=",
"Option",
".",
"OPTIONAL_VALUE",
"elif",
"operator",
"==",
"\"=\"",
":",
"mode",
"=",
"Option",
".",
"REQUIRED_VALUE",
"return",
"_option",
"(",
"token",
",",
"shortcut",
",",
"mode",
",",
"description",
",",
"default",
")"
] | Parse an option expression.
:param token: The option expression
:type token: str
:rtype: InputOption | [
"Parse",
"an",
"option",
"expression",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/parser.py#L120-L186 |
5,349 | sdispater/cleo | cleo/application.py | Application.add | def add(self, command): # type: (BaseCommand) -> Application
"""
Adds a command object.
"""
self.add_command(command.config)
command.set_application(self)
return self | python | def add(self, command): # type: (BaseCommand) -> Application
"""
Adds a command object.
"""
self.add_command(command.config)
command.set_application(self)
return self | [
"def",
"add",
"(",
"self",
",",
"command",
")",
":",
"# type: (BaseCommand) -> Application",
"self",
".",
"add_command",
"(",
"command",
".",
"config",
")",
"command",
".",
"set_application",
"(",
"self",
")",
"return",
"self"
] | Adds a command object. | [
"Adds",
"a",
"command",
"object",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/application.py#L38-L45 |
5,350 | sdispater/cleo | cleo/commands/command.py | Command._configure_using_fluent_definition | def _configure_using_fluent_definition(self):
"""
Configure the console command using a fluent definition.
"""
definition = Parser.parse(self.signature)
self._config.set_name(definition["name"])
for name, flags, description, default in definition["arguments"]:
self._config.add_argument(name, flags, description, default)
for long_name, short_name, flags, description, default in definition["options"]:
self._config.add_option(long_name, short_name, flags, description, default) | python | def _configure_using_fluent_definition(self):
"""
Configure the console command using a fluent definition.
"""
definition = Parser.parse(self.signature)
self._config.set_name(definition["name"])
for name, flags, description, default in definition["arguments"]:
self._config.add_argument(name, flags, description, default)
for long_name, short_name, flags, description, default in definition["options"]:
self._config.add_option(long_name, short_name, flags, description, default) | [
"def",
"_configure_using_fluent_definition",
"(",
"self",
")",
":",
"definition",
"=",
"Parser",
".",
"parse",
"(",
"self",
".",
"signature",
")",
"self",
".",
"_config",
".",
"set_name",
"(",
"definition",
"[",
"\"name\"",
"]",
")",
"for",
"name",
",",
"flags",
",",
"description",
",",
"default",
"in",
"definition",
"[",
"\"arguments\"",
"]",
":",
"self",
".",
"_config",
".",
"add_argument",
"(",
"name",
",",
"flags",
",",
"description",
",",
"default",
")",
"for",
"long_name",
",",
"short_name",
",",
"flags",
",",
"description",
",",
"default",
"in",
"definition",
"[",
"\"options\"",
"]",
":",
"self",
".",
"_config",
".",
"add_option",
"(",
"long_name",
",",
"short_name",
",",
"flags",
",",
"description",
",",
"default",
")"
] | Configure the console command using a fluent definition. | [
"Configure",
"the",
"console",
"command",
"using",
"a",
"fluent",
"definition",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L71-L83 |
5,351 | sdispater/cleo | cleo/commands/command.py | Command.argument | def argument(self, key=None):
"""
Get the value of a command argument.
"""
if key is None:
return self._args.arguments()
return self._args.argument(key) | python | def argument(self, key=None):
"""
Get the value of a command argument.
"""
if key is None:
return self._args.arguments()
return self._args.argument(key) | [
"def",
"argument",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"if",
"key",
"is",
"None",
":",
"return",
"self",
".",
"_args",
".",
"arguments",
"(",
")",
"return",
"self",
".",
"_args",
".",
"argument",
"(",
"key",
")"
] | Get the value of a command argument. | [
"Get",
"the",
"value",
"of",
"a",
"command",
"argument",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L124-L131 |
5,352 | sdispater/cleo | cleo/commands/command.py | Command.option | def option(self, key=None):
"""
Get the value of a command option.
"""
if key is None:
return self._args.options()
return self._args.option(key) | python | def option(self, key=None):
"""
Get the value of a command option.
"""
if key is None:
return self._args.options()
return self._args.option(key) | [
"def",
"option",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"if",
"key",
"is",
"None",
":",
"return",
"self",
".",
"_args",
".",
"options",
"(",
")",
"return",
"self",
".",
"_args",
".",
"option",
"(",
"key",
")"
] | Get the value of a command option. | [
"Get",
"the",
"value",
"of",
"a",
"command",
"option",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L133-L140 |
5,353 | sdispater/cleo | cleo/commands/command.py | Command.confirm | def confirm(self, question, default=False, true_answer_regex="(?i)^y"):
"""
Confirm a question with the user.
"""
return self._io.confirm(question, default, true_answer_regex) | python | def confirm(self, question, default=False, true_answer_regex="(?i)^y"):
"""
Confirm a question with the user.
"""
return self._io.confirm(question, default, true_answer_regex) | [
"def",
"confirm",
"(",
"self",
",",
"question",
",",
"default",
"=",
"False",
",",
"true_answer_regex",
"=",
"\"(?i)^y\"",
")",
":",
"return",
"self",
".",
"_io",
".",
"confirm",
"(",
"question",
",",
"default",
",",
"true_answer_regex",
")"
] | Confirm a question with the user. | [
"Confirm",
"a",
"question",
"with",
"the",
"user",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L142-L146 |
5,354 | sdispater/cleo | cleo/commands/command.py | Command.ask | def ask(self, question, default=None):
"""
Prompt the user for input.
"""
if isinstance(question, Question):
return self._io.ask_question(question)
return self._io.ask(question, default) | python | def ask(self, question, default=None):
"""
Prompt the user for input.
"""
if isinstance(question, Question):
return self._io.ask_question(question)
return self._io.ask(question, default) | [
"def",
"ask",
"(",
"self",
",",
"question",
",",
"default",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"question",
",",
"Question",
")",
":",
"return",
"self",
".",
"_io",
".",
"ask_question",
"(",
"question",
")",
"return",
"self",
".",
"_io",
".",
"ask",
"(",
"question",
",",
"default",
")"
] | Prompt the user for input. | [
"Prompt",
"the",
"user",
"for",
"input",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L148-L155 |
5,355 | sdispater/cleo | cleo/commands/command.py | Command.choice | def choice(self, question, choices, default=None, attempts=None, multiple=False):
"""
Give the user a single choice from an list of answers.
"""
question = ChoiceQuestion(question, choices, default)
question.set_max_attempts(attempts)
question.set_multi_select(multiple)
return self._io.ask_question(question) | python | def choice(self, question, choices, default=None, attempts=None, multiple=False):
"""
Give the user a single choice from an list of answers.
"""
question = ChoiceQuestion(question, choices, default)
question.set_max_attempts(attempts)
question.set_multi_select(multiple)
return self._io.ask_question(question) | [
"def",
"choice",
"(",
"self",
",",
"question",
",",
"choices",
",",
"default",
"=",
"None",
",",
"attempts",
"=",
"None",
",",
"multiple",
"=",
"False",
")",
":",
"question",
"=",
"ChoiceQuestion",
"(",
"question",
",",
"choices",
",",
"default",
")",
"question",
".",
"set_max_attempts",
"(",
"attempts",
")",
"question",
".",
"set_multi_select",
"(",
"multiple",
")",
"return",
"self",
".",
"_io",
".",
"ask_question",
"(",
"question",
")"
] | Give the user a single choice from an list of answers. | [
"Give",
"the",
"user",
"a",
"single",
"choice",
"from",
"an",
"list",
"of",
"answers",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L163-L172 |
5,356 | sdispater/cleo | cleo/commands/command.py | Command.create_question | def create_question(self, question, type=None, **kwargs):
"""
Returns a Question of specified type.
"""
if not type:
return Question(question, **kwargs)
if type == "choice":
return ChoiceQuestion(question, **kwargs)
if type == "confirmation":
return ConfirmationQuestion(question, **kwargs) | python | def create_question(self, question, type=None, **kwargs):
"""
Returns a Question of specified type.
"""
if not type:
return Question(question, **kwargs)
if type == "choice":
return ChoiceQuestion(question, **kwargs)
if type == "confirmation":
return ConfirmationQuestion(question, **kwargs) | [
"def",
"create_question",
"(",
"self",
",",
"question",
",",
"type",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"type",
":",
"return",
"Question",
"(",
"question",
",",
"*",
"*",
"kwargs",
")",
"if",
"type",
"==",
"\"choice\"",
":",
"return",
"ChoiceQuestion",
"(",
"question",
",",
"*",
"*",
"kwargs",
")",
"if",
"type",
"==",
"\"confirmation\"",
":",
"return",
"ConfirmationQuestion",
"(",
"question",
",",
"*",
"*",
"kwargs",
")"
] | Returns a Question of specified type. | [
"Returns",
"a",
"Question",
"of",
"specified",
"type",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L174-L185 |
5,357 | sdispater/cleo | cleo/commands/command.py | Command.table | def table(self, header=None, rows=None, style=None):
"""
Return a Table instance.
"""
if style is not None:
style = self.TABLE_STYLES[style]
table = Table(style)
if header:
table.set_header_row(header)
if rows:
table.set_rows(rows)
return table | python | def table(self, header=None, rows=None, style=None):
"""
Return a Table instance.
"""
if style is not None:
style = self.TABLE_STYLES[style]
table = Table(style)
if header:
table.set_header_row(header)
if rows:
table.set_rows(rows)
return table | [
"def",
"table",
"(",
"self",
",",
"header",
"=",
"None",
",",
"rows",
"=",
"None",
",",
"style",
"=",
"None",
")",
":",
"if",
"style",
"is",
"not",
"None",
":",
"style",
"=",
"self",
".",
"TABLE_STYLES",
"[",
"style",
"]",
"table",
"=",
"Table",
"(",
"style",
")",
"if",
"header",
":",
"table",
".",
"set_header_row",
"(",
"header",
")",
"if",
"rows",
":",
"table",
".",
"set_rows",
"(",
"rows",
")",
"return",
"table"
] | Return a Table instance. | [
"Return",
"a",
"Table",
"instance",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L187-L202 |
5,358 | sdispater/cleo | cleo/commands/command.py | Command.render_table | def render_table(self, headers, rows, style=None):
"""
Format input to textual table.
"""
table = self.table(headers, rows, style)
table.render(self._io) | python | def render_table(self, headers, rows, style=None):
"""
Format input to textual table.
"""
table = self.table(headers, rows, style)
table.render(self._io) | [
"def",
"render_table",
"(",
"self",
",",
"headers",
",",
"rows",
",",
"style",
"=",
"None",
")",
":",
"table",
"=",
"self",
".",
"table",
"(",
"headers",
",",
"rows",
",",
"style",
")",
"table",
".",
"render",
"(",
"self",
".",
"_io",
")"
] | Format input to textual table. | [
"Format",
"input",
"to",
"textual",
"table",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L204-L210 |
5,359 | sdispater/cleo | cleo/commands/command.py | Command.line | def line(self, text, style=None, verbosity=None):
"""
Write a string as information output.
"""
if style:
styled = "<%s>%s</>" % (style, text)
else:
styled = text
self._io.write_line(styled, verbosity) | python | def line(self, text, style=None, verbosity=None):
"""
Write a string as information output.
"""
if style:
styled = "<%s>%s</>" % (style, text)
else:
styled = text
self._io.write_line(styled, verbosity) | [
"def",
"line",
"(",
"self",
",",
"text",
",",
"style",
"=",
"None",
",",
"verbosity",
"=",
"None",
")",
":",
"if",
"style",
":",
"styled",
"=",
"\"<%s>%s</>\"",
"%",
"(",
"style",
",",
"text",
")",
"else",
":",
"styled",
"=",
"text",
"self",
".",
"_io",
".",
"write_line",
"(",
"styled",
",",
"verbosity",
")"
] | Write a string as information output. | [
"Write",
"a",
"string",
"as",
"information",
"output",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L224-L233 |
5,360 | sdispater/cleo | cleo/commands/command.py | Command.line_error | def line_error(self, text, style=None, verbosity=None):
"""
Write a string as information output to stderr.
"""
if style:
styled = "<%s>%s</>" % (style, text)
else:
styled = text
self._io.error_line(styled, verbosity) | python | def line_error(self, text, style=None, verbosity=None):
"""
Write a string as information output to stderr.
"""
if style:
styled = "<%s>%s</>" % (style, text)
else:
styled = text
self._io.error_line(styled, verbosity) | [
"def",
"line_error",
"(",
"self",
",",
"text",
",",
"style",
"=",
"None",
",",
"verbosity",
"=",
"None",
")",
":",
"if",
"style",
":",
"styled",
"=",
"\"<%s>%s</>\"",
"%",
"(",
"style",
",",
"text",
")",
"else",
":",
"styled",
"=",
"text",
"self",
".",
"_io",
".",
"error_line",
"(",
"styled",
",",
"verbosity",
")"
] | Write a string as information output to stderr. | [
"Write",
"a",
"string",
"as",
"information",
"output",
"to",
"stderr",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L235-L244 |
5,361 | sdispater/cleo | cleo/commands/command.py | Command.progress_indicator | def progress_indicator(self, fmt=None, interval=100, values=None):
"""
Creates a new progress indicator.
"""
return ProgressIndicator(self.io, fmt, interval, values) | python | def progress_indicator(self, fmt=None, interval=100, values=None):
"""
Creates a new progress indicator.
"""
return ProgressIndicator(self.io, fmt, interval, values) | [
"def",
"progress_indicator",
"(",
"self",
",",
"fmt",
"=",
"None",
",",
"interval",
"=",
"100",
",",
"values",
"=",
"None",
")",
":",
"return",
"ProgressIndicator",
"(",
"self",
".",
"io",
",",
"fmt",
",",
"interval",
",",
"values",
")"
] | Creates a new progress indicator. | [
"Creates",
"a",
"new",
"progress",
"indicator",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L284-L288 |
5,362 | sdispater/cleo | cleo/commands/command.py | Command.spin | def spin(self, start_message, end_message, fmt=None, interval=100, values=None):
"""
Automatically spin a progress indicator.
"""
spinner = ProgressIndicator(self.io, fmt, interval, values)
return spinner.auto(start_message, end_message) | python | def spin(self, start_message, end_message, fmt=None, interval=100, values=None):
"""
Automatically spin a progress indicator.
"""
spinner = ProgressIndicator(self.io, fmt, interval, values)
return spinner.auto(start_message, end_message) | [
"def",
"spin",
"(",
"self",
",",
"start_message",
",",
"end_message",
",",
"fmt",
"=",
"None",
",",
"interval",
"=",
"100",
",",
"values",
"=",
"None",
")",
":",
"spinner",
"=",
"ProgressIndicator",
"(",
"self",
".",
"io",
",",
"fmt",
",",
"interval",
",",
"values",
")",
"return",
"spinner",
".",
"auto",
"(",
"start_message",
",",
"end_message",
")"
] | Automatically spin a progress indicator. | [
"Automatically",
"spin",
"a",
"progress",
"indicator",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L290-L296 |
5,363 | sdispater/cleo | cleo/commands/command.py | Command.add_style | def add_style(self, name, fg=None, bg=None, options=None):
"""
Adds a new style
"""
style = Style(name)
if fg is not None:
style.fg(fg)
if bg is not None:
style.bg(bg)
if options is not None:
if "bold" in options:
style.bold()
if "underline" in options:
style.underlined()
self._io.output.formatter.add_style(style)
self._io.error_output.formatter.add_style(style) | python | def add_style(self, name, fg=None, bg=None, options=None):
"""
Adds a new style
"""
style = Style(name)
if fg is not None:
style.fg(fg)
if bg is not None:
style.bg(bg)
if options is not None:
if "bold" in options:
style.bold()
if "underline" in options:
style.underlined()
self._io.output.formatter.add_style(style)
self._io.error_output.formatter.add_style(style) | [
"def",
"add_style",
"(",
"self",
",",
"name",
",",
"fg",
"=",
"None",
",",
"bg",
"=",
"None",
",",
"options",
"=",
"None",
")",
":",
"style",
"=",
"Style",
"(",
"name",
")",
"if",
"fg",
"is",
"not",
"None",
":",
"style",
".",
"fg",
"(",
"fg",
")",
"if",
"bg",
"is",
"not",
"None",
":",
"style",
".",
"bg",
"(",
"bg",
")",
"if",
"options",
"is",
"not",
"None",
":",
"if",
"\"bold\"",
"in",
"options",
":",
"style",
".",
"bold",
"(",
")",
"if",
"\"underline\"",
"in",
"options",
":",
"style",
".",
"underlined",
"(",
")",
"self",
".",
"_io",
".",
"output",
".",
"formatter",
".",
"add_style",
"(",
"style",
")",
"self",
".",
"_io",
".",
"error_output",
".",
"formatter",
".",
"add_style",
"(",
"style",
")"
] | Adds a new style | [
"Adds",
"a",
"new",
"style"
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L298-L317 |
5,364 | sdispater/cleo | cleo/commands/command.py | Command.overwrite | def overwrite(self, text, size=None):
"""
Overwrites the current line.
It will not add a new line so use line('')
if necessary.
"""
self._io.overwrite(text, size=size) | python | def overwrite(self, text, size=None):
"""
Overwrites the current line.
It will not add a new line so use line('')
if necessary.
"""
self._io.overwrite(text, size=size) | [
"def",
"overwrite",
"(",
"self",
",",
"text",
",",
"size",
"=",
"None",
")",
":",
"self",
".",
"_io",
".",
"overwrite",
"(",
"text",
",",
"size",
"=",
"size",
")"
] | Overwrites the current line.
It will not add a new line so use line('')
if necessary. | [
"Overwrites",
"the",
"current",
"line",
"."
] | cf44ac2eba2d6435516501e47e5521ee2da9115a | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L319-L326 |
5,365 | Mergifyio/git-pull-request | git_pull_request/__init__.py | get_github_hostname_user_repo_from_url | def get_github_hostname_user_repo_from_url(url):
"""Return hostname, user and repository to fork from.
:param url: The URL to parse
:return: hostname, user, repository
"""
parsed = parse.urlparse(url)
if parsed.netloc == '':
# Probably ssh
host, sep, path = parsed.path.partition(":")
if "@" in host:
username, sep, host = host.partition("@")
else:
path = parsed.path[1:].rstrip('/')
host = parsed.netloc
user, repo = path.split("/", 1)
return host, user, repo[:-4] if repo.endswith('.git') else repo | python | def get_github_hostname_user_repo_from_url(url):
"""Return hostname, user and repository to fork from.
:param url: The URL to parse
:return: hostname, user, repository
"""
parsed = parse.urlparse(url)
if parsed.netloc == '':
# Probably ssh
host, sep, path = parsed.path.partition(":")
if "@" in host:
username, sep, host = host.partition("@")
else:
path = parsed.path[1:].rstrip('/')
host = parsed.netloc
user, repo = path.split("/", 1)
return host, user, repo[:-4] if repo.endswith('.git') else repo | [
"def",
"get_github_hostname_user_repo_from_url",
"(",
"url",
")",
":",
"parsed",
"=",
"parse",
".",
"urlparse",
"(",
"url",
")",
"if",
"parsed",
".",
"netloc",
"==",
"''",
":",
"# Probably ssh",
"host",
",",
"sep",
",",
"path",
"=",
"parsed",
".",
"path",
".",
"partition",
"(",
"\":\"",
")",
"if",
"\"@\"",
"in",
"host",
":",
"username",
",",
"sep",
",",
"host",
"=",
"host",
".",
"partition",
"(",
"\"@\"",
")",
"else",
":",
"path",
"=",
"parsed",
".",
"path",
"[",
"1",
":",
"]",
".",
"rstrip",
"(",
"'/'",
")",
"host",
"=",
"parsed",
".",
"netloc",
"user",
",",
"repo",
"=",
"path",
".",
"split",
"(",
"\"/\"",
",",
"1",
")",
"return",
"host",
",",
"user",
",",
"repo",
"[",
":",
"-",
"4",
"]",
"if",
"repo",
".",
"endswith",
"(",
"'.git'",
")",
"else",
"repo"
] | Return hostname, user and repository to fork from.
:param url: The URL to parse
:return: hostname, user, repository | [
"Return",
"hostname",
"user",
"and",
"repository",
"to",
"fork",
"from",
"."
] | 58dbe3325b3dfada02482a32223fb36ebb193248 | https://github.com/Mergifyio/git-pull-request/blob/58dbe3325b3dfada02482a32223fb36ebb193248/git_pull_request/__init__.py#L114-L130 |
5,366 | Mergifyio/git-pull-request | git_pull_request/__init__.py | git_get_title_and_message | def git_get_title_and_message(begin, end):
"""Get title and message summary for patches between 2 commits.
:param begin: first commit to look at
:param end: last commit to look at
:return: number of commits, title, message
"""
titles = git_get_log_titles(begin, end)
title = "Pull request for " + end
if len(titles) == 1:
title = titles[0]
pr_template = find_pull_request_template()
if pr_template:
message = get_pr_template_message(pr_template)
else:
if len(titles) == 1:
message = git_get_commit_body(end)
else:
message = "\n".join(titles)
return (len(titles), title, message) | python | def git_get_title_and_message(begin, end):
"""Get title and message summary for patches between 2 commits.
:param begin: first commit to look at
:param end: last commit to look at
:return: number of commits, title, message
"""
titles = git_get_log_titles(begin, end)
title = "Pull request for " + end
if len(titles) == 1:
title = titles[0]
pr_template = find_pull_request_template()
if pr_template:
message = get_pr_template_message(pr_template)
else:
if len(titles) == 1:
message = git_get_commit_body(end)
else:
message = "\n".join(titles)
return (len(titles), title, message) | [
"def",
"git_get_title_and_message",
"(",
"begin",
",",
"end",
")",
":",
"titles",
"=",
"git_get_log_titles",
"(",
"begin",
",",
"end",
")",
"title",
"=",
"\"Pull request for \"",
"+",
"end",
"if",
"len",
"(",
"titles",
")",
"==",
"1",
":",
"title",
"=",
"titles",
"[",
"0",
"]",
"pr_template",
"=",
"find_pull_request_template",
"(",
")",
"if",
"pr_template",
":",
"message",
"=",
"get_pr_template_message",
"(",
"pr_template",
")",
"else",
":",
"if",
"len",
"(",
"titles",
")",
"==",
"1",
":",
"message",
"=",
"git_get_commit_body",
"(",
"end",
")",
"else",
":",
"message",
"=",
"\"\\n\"",
".",
"join",
"(",
"titles",
")",
"return",
"(",
"len",
"(",
"titles",
")",
",",
"title",
",",
"message",
")"
] | Get title and message summary for patches between 2 commits.
:param begin: first commit to look at
:param end: last commit to look at
:return: number of commits, title, message | [
"Get",
"title",
"and",
"message",
"summary",
"for",
"patches",
"between",
"2",
"commits",
"."
] | 58dbe3325b3dfada02482a32223fb36ebb193248 | https://github.com/Mergifyio/git-pull-request/blob/58dbe3325b3dfada02482a32223fb36ebb193248/git_pull_request/__init__.py#L160-L180 |
5,367 | zhebrak/raftos | raftos/state.py | validate_commit_index | def validate_commit_index(func):
"""Apply to State Machine everything up to commit index"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
for not_applied in range(self.log.last_applied + 1, self.log.commit_index + 1):
self.state_machine.apply(self.log[not_applied]['command'])
self.log.last_applied += 1
try:
self.apply_future.set_result(not_applied)
except (asyncio.futures.InvalidStateError, AttributeError):
pass
return func(self, *args, **kwargs)
return wrapped | python | def validate_commit_index(func):
"""Apply to State Machine everything up to commit index"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
for not_applied in range(self.log.last_applied + 1, self.log.commit_index + 1):
self.state_machine.apply(self.log[not_applied]['command'])
self.log.last_applied += 1
try:
self.apply_future.set_result(not_applied)
except (asyncio.futures.InvalidStateError, AttributeError):
pass
return func(self, *args, **kwargs)
return wrapped | [
"def",
"validate_commit_index",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapped",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"not_applied",
"in",
"range",
"(",
"self",
".",
"log",
".",
"last_applied",
"+",
"1",
",",
"self",
".",
"log",
".",
"commit_index",
"+",
"1",
")",
":",
"self",
".",
"state_machine",
".",
"apply",
"(",
"self",
".",
"log",
"[",
"not_applied",
"]",
"[",
"'command'",
"]",
")",
"self",
".",
"log",
".",
"last_applied",
"+=",
"1",
"try",
":",
"self",
".",
"apply_future",
".",
"set_result",
"(",
"not_applied",
")",
"except",
"(",
"asyncio",
".",
"futures",
".",
"InvalidStateError",
",",
"AttributeError",
")",
":",
"pass",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped"
] | Apply to State Machine everything up to commit index | [
"Apply",
"to",
"State",
"Machine",
"everything",
"up",
"to",
"commit",
"index"
] | 0d6f9e049b526279b1035f597291a96cf50c9b40 | https://github.com/zhebrak/raftos/blob/0d6f9e049b526279b1035f597291a96cf50c9b40/raftos/state.py#L42-L57 |
5,368 | zhebrak/raftos | raftos/state.py | Leader.execute_command | async def execute_command(self, command):
"""Write to log & send AppendEntries RPC"""
self.apply_future = asyncio.Future(loop=self.loop)
entry = self.log.write(self.storage.term, command)
asyncio.ensure_future(self.append_entries(), loop=self.loop)
await self.apply_future | python | async def execute_command(self, command):
"""Write to log & send AppendEntries RPC"""
self.apply_future = asyncio.Future(loop=self.loop)
entry = self.log.write(self.storage.term, command)
asyncio.ensure_future(self.append_entries(), loop=self.loop)
await self.apply_future | [
"async",
"def",
"execute_command",
"(",
"self",
",",
"command",
")",
":",
"self",
".",
"apply_future",
"=",
"asyncio",
".",
"Future",
"(",
"loop",
"=",
"self",
".",
"loop",
")",
"entry",
"=",
"self",
".",
"log",
".",
"write",
"(",
"self",
".",
"storage",
".",
"term",
",",
"command",
")",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"append_entries",
"(",
")",
",",
"loop",
"=",
"self",
".",
"loop",
")",
"await",
"self",
".",
"apply_future"
] | Write to log & send AppendEntries RPC | [
"Write",
"to",
"log",
"&",
"send",
"AppendEntries",
"RPC"
] | 0d6f9e049b526279b1035f597291a96cf50c9b40 | https://github.com/zhebrak/raftos/blob/0d6f9e049b526279b1035f597291a96cf50c9b40/raftos/state.py#L260-L267 |
5,369 | zhebrak/raftos | raftos/state.py | Candidate.start | def start(self):
"""Increment current term, vote for herself & send vote requests"""
self.storage.update({
'term': self.storage.term + 1,
'voted_for': self.id
})
self.vote_count = 1
self.request_vote()
self.election_timer.start() | python | def start(self):
"""Increment current term, vote for herself & send vote requests"""
self.storage.update({
'term': self.storage.term + 1,
'voted_for': self.id
})
self.vote_count = 1
self.request_vote()
self.election_timer.start() | [
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"storage",
".",
"update",
"(",
"{",
"'term'",
":",
"self",
".",
"storage",
".",
"term",
"+",
"1",
",",
"'voted_for'",
":",
"self",
".",
"id",
"}",
")",
"self",
".",
"vote_count",
"=",
"1",
"self",
".",
"request_vote",
"(",
")",
"self",
".",
"election_timer",
".",
"start",
"(",
")"
] | Increment current term, vote for herself & send vote requests | [
"Increment",
"current",
"term",
"vote",
"for",
"herself",
"&",
"send",
"vote",
"requests"
] | 0d6f9e049b526279b1035f597291a96cf50c9b40 | https://github.com/zhebrak/raftos/blob/0d6f9e049b526279b1035f597291a96cf50c9b40/raftos/state.py#L293-L302 |
5,370 | zhebrak/raftos | raftos/state.py | Candidate.on_receive_request_vote_response | def on_receive_request_vote_response(self, data):
"""Receives response for vote request.
If the vote was granted then check if we got majority and may become Leader
"""
if data.get('vote_granted'):
self.vote_count += 1
if self.state.is_majority(self.vote_count):
self.state.to_leader() | python | def on_receive_request_vote_response(self, data):
"""Receives response for vote request.
If the vote was granted then check if we got majority and may become Leader
"""
if data.get('vote_granted'):
self.vote_count += 1
if self.state.is_majority(self.vote_count):
self.state.to_leader() | [
"def",
"on_receive_request_vote_response",
"(",
"self",
",",
"data",
")",
":",
"if",
"data",
".",
"get",
"(",
"'vote_granted'",
")",
":",
"self",
".",
"vote_count",
"+=",
"1",
"if",
"self",
".",
"state",
".",
"is_majority",
"(",
"self",
".",
"vote_count",
")",
":",
"self",
".",
"state",
".",
"to_leader",
"(",
")"
] | Receives response for vote request.
If the vote was granted then check if we got majority and may become Leader | [
"Receives",
"response",
"for",
"vote",
"request",
".",
"If",
"the",
"vote",
"was",
"granted",
"then",
"check",
"if",
"we",
"got",
"majority",
"and",
"may",
"become",
"Leader"
] | 0d6f9e049b526279b1035f597291a96cf50c9b40 | https://github.com/zhebrak/raftos/blob/0d6f9e049b526279b1035f597291a96cf50c9b40/raftos/state.py#L326-L335 |
5,371 | zhebrak/raftos | raftos/state.py | Follower.init_storage | def init_storage(self):
"""Set current term to zero upon initialization & voted_for to None"""
if not self.storage.exists('term'):
self.storage.update({
'term': 0,
})
self.storage.update({
'voted_for': None
}) | python | def init_storage(self):
"""Set current term to zero upon initialization & voted_for to None"""
if not self.storage.exists('term'):
self.storage.update({
'term': 0,
})
self.storage.update({
'voted_for': None
}) | [
"def",
"init_storage",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"storage",
".",
"exists",
"(",
"'term'",
")",
":",
"self",
".",
"storage",
".",
"update",
"(",
"{",
"'term'",
":",
"0",
",",
"}",
")",
"self",
".",
"storage",
".",
"update",
"(",
"{",
"'voted_for'",
":",
"None",
"}",
")"
] | Set current term to zero upon initialization & voted_for to None | [
"Set",
"current",
"term",
"to",
"zero",
"upon",
"initialization",
"&",
"voted_for",
"to",
"None"
] | 0d6f9e049b526279b1035f597291a96cf50c9b40 | https://github.com/zhebrak/raftos/blob/0d6f9e049b526279b1035f597291a96cf50c9b40/raftos/state.py#L368-L377 |
5,372 | zhebrak/raftos | raftos/state.py | State.wait_for_election_success | async def wait_for_election_success(cls):
"""Await this function if your cluster must have a leader"""
if cls.leader is None:
cls.leader_future = asyncio.Future(loop=cls.loop)
await cls.leader_future | python | async def wait_for_election_success(cls):
"""Await this function if your cluster must have a leader"""
if cls.leader is None:
cls.leader_future = asyncio.Future(loop=cls.loop)
await cls.leader_future | [
"async",
"def",
"wait_for_election_success",
"(",
"cls",
")",
":",
"if",
"cls",
".",
"leader",
"is",
"None",
":",
"cls",
".",
"leader_future",
"=",
"asyncio",
".",
"Future",
"(",
"loop",
"=",
"cls",
".",
"loop",
")",
"await",
"cls",
".",
"leader_future"
] | Await this function if your cluster must have a leader | [
"Await",
"this",
"function",
"if",
"your",
"cluster",
"must",
"have",
"a",
"leader"
] | 0d6f9e049b526279b1035f597291a96cf50c9b40 | https://github.com/zhebrak/raftos/blob/0d6f9e049b526279b1035f597291a96cf50c9b40/raftos/state.py#L597-L601 |
5,373 | zhebrak/raftos | raftos/state.py | State.wait_until_leader | async def wait_until_leader(cls, node_id):
"""Await this function if you want to do nothing until node_id becomes a leader"""
if node_id is None:
raise ValueError('Node id can not be None!')
if cls.get_leader() != node_id:
cls.wait_until_leader_id = node_id
cls.wait_until_leader_future = asyncio.Future(loop=cls.loop)
await cls.wait_until_leader_future
cls.wait_until_leader_id = None
cls.wait_until_leader_future = None | python | async def wait_until_leader(cls, node_id):
"""Await this function if you want to do nothing until node_id becomes a leader"""
if node_id is None:
raise ValueError('Node id can not be None!')
if cls.get_leader() != node_id:
cls.wait_until_leader_id = node_id
cls.wait_until_leader_future = asyncio.Future(loop=cls.loop)
await cls.wait_until_leader_future
cls.wait_until_leader_id = None
cls.wait_until_leader_future = None | [
"async",
"def",
"wait_until_leader",
"(",
"cls",
",",
"node_id",
")",
":",
"if",
"node_id",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Node id can not be None!'",
")",
"if",
"cls",
".",
"get_leader",
"(",
")",
"!=",
"node_id",
":",
"cls",
".",
"wait_until_leader_id",
"=",
"node_id",
"cls",
".",
"wait_until_leader_future",
"=",
"asyncio",
".",
"Future",
"(",
"loop",
"=",
"cls",
".",
"loop",
")",
"await",
"cls",
".",
"wait_until_leader_future",
"cls",
".",
"wait_until_leader_id",
"=",
"None",
"cls",
".",
"wait_until_leader_future",
"=",
"None"
] | Await this function if you want to do nothing until node_id becomes a leader | [
"Await",
"this",
"function",
"if",
"you",
"want",
"to",
"do",
"nothing",
"until",
"node_id",
"becomes",
"a",
"leader"
] | 0d6f9e049b526279b1035f597291a96cf50c9b40 | https://github.com/zhebrak/raftos/blob/0d6f9e049b526279b1035f597291a96cf50c9b40/raftos/state.py#L604-L615 |
5,374 | datajoint/datajoint-python | datajoint/declare.py | declare | def declare(full_table_name, definition, context):
"""
Parse declaration and create new SQL table accordingly.
:param full_table_name: full name of the table
:param definition: DataJoint table definition
:param context: dictionary of objects that might be referred to in the table.
"""
table_name = full_table_name.strip('`').split('.')[1]
if len(table_name) > MAX_TABLE_NAME_LENGTH:
raise DataJointError(
'Table name `{name}` exceeds the max length of {max_length}'.format(
name=table_name,
max_length=MAX_TABLE_NAME_LENGTH))
# split definition into lines
definition = re.split(r'\s*\n\s*', definition.strip())
# check for optional table comment
table_comment = definition.pop(0)[1:].strip() if definition[0].startswith('#') else ''
in_key = True # parse primary keys
primary_key = []
attributes = []
attribute_sql = []
foreign_key_sql = []
index_sql = []
uses_external = False
for line in definition:
if line.startswith('#'): # additional comments are ignored
pass
elif line.startswith('---') or line.startswith('___'):
in_key = False # start parsing dependent attributes
elif is_foreign_key(line):
compile_foreign_key(line, context, attributes,
primary_key if in_key else None,
attribute_sql, foreign_key_sql, index_sql)
elif re.match(r'^(unique\s+)?index[^:]*$', line, re.I): # index
compile_index(line, index_sql)
else:
name, sql, is_external = compile_attribute(line, in_key, foreign_key_sql)
uses_external = uses_external or is_external
if in_key and name not in primary_key:
primary_key.append(name)
if name not in attributes:
attributes.append(name)
attribute_sql.append(sql)
# compile SQL
if not primary_key:
raise DataJointError('Table must have a primary key')
return (
'CREATE TABLE IF NOT EXISTS %s (\n' % full_table_name +
',\n'.join(attribute_sql + ['PRIMARY KEY (`' + '`,`'.join(primary_key) + '`)'] + foreign_key_sql + index_sql) +
'\n) ENGINE=InnoDB, COMMENT "%s"' % table_comment), uses_external | python | def declare(full_table_name, definition, context):
"""
Parse declaration and create new SQL table accordingly.
:param full_table_name: full name of the table
:param definition: DataJoint table definition
:param context: dictionary of objects that might be referred to in the table.
"""
table_name = full_table_name.strip('`').split('.')[1]
if len(table_name) > MAX_TABLE_NAME_LENGTH:
raise DataJointError(
'Table name `{name}` exceeds the max length of {max_length}'.format(
name=table_name,
max_length=MAX_TABLE_NAME_LENGTH))
# split definition into lines
definition = re.split(r'\s*\n\s*', definition.strip())
# check for optional table comment
table_comment = definition.pop(0)[1:].strip() if definition[0].startswith('#') else ''
in_key = True # parse primary keys
primary_key = []
attributes = []
attribute_sql = []
foreign_key_sql = []
index_sql = []
uses_external = False
for line in definition:
if line.startswith('#'): # additional comments are ignored
pass
elif line.startswith('---') or line.startswith('___'):
in_key = False # start parsing dependent attributes
elif is_foreign_key(line):
compile_foreign_key(line, context, attributes,
primary_key if in_key else None,
attribute_sql, foreign_key_sql, index_sql)
elif re.match(r'^(unique\s+)?index[^:]*$', line, re.I): # index
compile_index(line, index_sql)
else:
name, sql, is_external = compile_attribute(line, in_key, foreign_key_sql)
uses_external = uses_external or is_external
if in_key and name not in primary_key:
primary_key.append(name)
if name not in attributes:
attributes.append(name)
attribute_sql.append(sql)
# compile SQL
if not primary_key:
raise DataJointError('Table must have a primary key')
return (
'CREATE TABLE IF NOT EXISTS %s (\n' % full_table_name +
',\n'.join(attribute_sql + ['PRIMARY KEY (`' + '`,`'.join(primary_key) + '`)'] + foreign_key_sql + index_sql) +
'\n) ENGINE=InnoDB, COMMENT "%s"' % table_comment), uses_external | [
"def",
"declare",
"(",
"full_table_name",
",",
"definition",
",",
"context",
")",
":",
"table_name",
"=",
"full_table_name",
".",
"strip",
"(",
"'`'",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
"]",
"if",
"len",
"(",
"table_name",
")",
">",
"MAX_TABLE_NAME_LENGTH",
":",
"raise",
"DataJointError",
"(",
"'Table name `{name}` exceeds the max length of {max_length}'",
".",
"format",
"(",
"name",
"=",
"table_name",
",",
"max_length",
"=",
"MAX_TABLE_NAME_LENGTH",
")",
")",
"# split definition into lines",
"definition",
"=",
"re",
".",
"split",
"(",
"r'\\s*\\n\\s*'",
",",
"definition",
".",
"strip",
"(",
")",
")",
"# check for optional table comment",
"table_comment",
"=",
"definition",
".",
"pop",
"(",
"0",
")",
"[",
"1",
":",
"]",
".",
"strip",
"(",
")",
"if",
"definition",
"[",
"0",
"]",
".",
"startswith",
"(",
"'#'",
")",
"else",
"''",
"in_key",
"=",
"True",
"# parse primary keys",
"primary_key",
"=",
"[",
"]",
"attributes",
"=",
"[",
"]",
"attribute_sql",
"=",
"[",
"]",
"foreign_key_sql",
"=",
"[",
"]",
"index_sql",
"=",
"[",
"]",
"uses_external",
"=",
"False",
"for",
"line",
"in",
"definition",
":",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"# additional comments are ignored",
"pass",
"elif",
"line",
".",
"startswith",
"(",
"'---'",
")",
"or",
"line",
".",
"startswith",
"(",
"'___'",
")",
":",
"in_key",
"=",
"False",
"# start parsing dependent attributes",
"elif",
"is_foreign_key",
"(",
"line",
")",
":",
"compile_foreign_key",
"(",
"line",
",",
"context",
",",
"attributes",
",",
"primary_key",
"if",
"in_key",
"else",
"None",
",",
"attribute_sql",
",",
"foreign_key_sql",
",",
"index_sql",
")",
"elif",
"re",
".",
"match",
"(",
"r'^(unique\\s+)?index[^:]*$'",
",",
"line",
",",
"re",
".",
"I",
")",
":",
"# index",
"compile_index",
"(",
"line",
",",
"index_sql",
")",
"else",
":",
"name",
",",
"sql",
",",
"is_external",
"=",
"compile_attribute",
"(",
"line",
",",
"in_key",
",",
"foreign_key_sql",
")",
"uses_external",
"=",
"uses_external",
"or",
"is_external",
"if",
"in_key",
"and",
"name",
"not",
"in",
"primary_key",
":",
"primary_key",
".",
"append",
"(",
"name",
")",
"if",
"name",
"not",
"in",
"attributes",
":",
"attributes",
".",
"append",
"(",
"name",
")",
"attribute_sql",
".",
"append",
"(",
"sql",
")",
"# compile SQL",
"if",
"not",
"primary_key",
":",
"raise",
"DataJointError",
"(",
"'Table must have a primary key'",
")",
"return",
"(",
"'CREATE TABLE IF NOT EXISTS %s (\\n'",
"%",
"full_table_name",
"+",
"',\\n'",
".",
"join",
"(",
"attribute_sql",
"+",
"[",
"'PRIMARY KEY (`'",
"+",
"'`,`'",
".",
"join",
"(",
"primary_key",
")",
"+",
"'`)'",
"]",
"+",
"foreign_key_sql",
"+",
"index_sql",
")",
"+",
"'\\n) ENGINE=InnoDB, COMMENT \"%s\"'",
"%",
"table_comment",
")",
",",
"uses_external"
] | Parse declaration and create new SQL table accordingly.
:param full_table_name: full name of the table
:param definition: DataJoint table definition
:param context: dictionary of objects that might be referred to in the table. | [
"Parse",
"declaration",
"and",
"create",
"new",
"SQL",
"table",
"accordingly",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/declare.py#L192-L245 |
5,375 | datajoint/datajoint-python | datajoint/schema.py | create_virtual_module | def create_virtual_module(module_name, schema_name, create_schema=False, create_tables=False, connection=None):
"""
Creates a python module with the given name from the name of a schema on the server and
automatically adds classes to it corresponding to the tables in the schema.
:param module_name: displayed module name
:param schema_name: name of the database in mysql
:param create_schema: if True, create the schema on the database server
:param create_tables: if True, module.schema can be used as the decorator for declaring new
:return: the python module containing classes from the schema object and the table classes
"""
module = types.ModuleType(module_name)
_schema = Schema(schema_name, create_schema=create_schema, create_tables=create_tables, connection=connection)
_schema.spawn_missing_classes(context=module.__dict__)
module.__dict__['schema'] = _schema
return module | python | def create_virtual_module(module_name, schema_name, create_schema=False, create_tables=False, connection=None):
"""
Creates a python module with the given name from the name of a schema on the server and
automatically adds classes to it corresponding to the tables in the schema.
:param module_name: displayed module name
:param schema_name: name of the database in mysql
:param create_schema: if True, create the schema on the database server
:param create_tables: if True, module.schema can be used as the decorator for declaring new
:return: the python module containing classes from the schema object and the table classes
"""
module = types.ModuleType(module_name)
_schema = Schema(schema_name, create_schema=create_schema, create_tables=create_tables, connection=connection)
_schema.spawn_missing_classes(context=module.__dict__)
module.__dict__['schema'] = _schema
return module | [
"def",
"create_virtual_module",
"(",
"module_name",
",",
"schema_name",
",",
"create_schema",
"=",
"False",
",",
"create_tables",
"=",
"False",
",",
"connection",
"=",
"None",
")",
":",
"module",
"=",
"types",
".",
"ModuleType",
"(",
"module_name",
")",
"_schema",
"=",
"Schema",
"(",
"schema_name",
",",
"create_schema",
"=",
"create_schema",
",",
"create_tables",
"=",
"create_tables",
",",
"connection",
"=",
"connection",
")",
"_schema",
".",
"spawn_missing_classes",
"(",
"context",
"=",
"module",
".",
"__dict__",
")",
"module",
".",
"__dict__",
"[",
"'schema'",
"]",
"=",
"_schema",
"return",
"module"
] | Creates a python module with the given name from the name of a schema on the server and
automatically adds classes to it corresponding to the tables in the schema.
:param module_name: displayed module name
:param schema_name: name of the database in mysql
:param create_schema: if True, create the schema on the database server
:param create_tables: if True, module.schema can be used as the decorator for declaring new
:return: the python module containing classes from the schema object and the table classes | [
"Creates",
"a",
"python",
"module",
"with",
"the",
"given",
"name",
"from",
"the",
"name",
"of",
"a",
"schema",
"on",
"the",
"server",
"and",
"automatically",
"adds",
"classes",
"to",
"it",
"corresponding",
"to",
"the",
"tables",
"in",
"the",
"schema",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/schema.py#L241-L256 |
5,376 | datajoint/datajoint-python | datajoint/schema.py | Schema.drop | def drop(self, force=False):
"""
Drop the associated schema if it exists
"""
if not self.exists:
logger.info("Schema named `{database}` does not exist. Doing nothing.".format(database=self.database))
elif (not config['safemode'] or
force or
user_choice("Proceed to delete entire schema `%s`?" % self.database, default='no') == 'yes'):
logger.info("Dropping `{database}`.".format(database=self.database))
try:
self.connection.query("DROP DATABASE `{database}`".format(database=self.database))
logger.info("Schema `{database}` was dropped successfully.".format(database=self.database))
except pymysql.OperationalError:
raise DataJointError("An attempt to drop schema `{database}` "
"has failed. Check permissions.".format(database=self.database)) | python | def drop(self, force=False):
"""
Drop the associated schema if it exists
"""
if not self.exists:
logger.info("Schema named `{database}` does not exist. Doing nothing.".format(database=self.database))
elif (not config['safemode'] or
force or
user_choice("Proceed to delete entire schema `%s`?" % self.database, default='no') == 'yes'):
logger.info("Dropping `{database}`.".format(database=self.database))
try:
self.connection.query("DROP DATABASE `{database}`".format(database=self.database))
logger.info("Schema `{database}` was dropped successfully.".format(database=self.database))
except pymysql.OperationalError:
raise DataJointError("An attempt to drop schema `{database}` "
"has failed. Check permissions.".format(database=self.database)) | [
"def",
"drop",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"exists",
":",
"logger",
".",
"info",
"(",
"\"Schema named `{database}` does not exist. Doing nothing.\"",
".",
"format",
"(",
"database",
"=",
"self",
".",
"database",
")",
")",
"elif",
"(",
"not",
"config",
"[",
"'safemode'",
"]",
"or",
"force",
"or",
"user_choice",
"(",
"\"Proceed to delete entire schema `%s`?\"",
"%",
"self",
".",
"database",
",",
"default",
"=",
"'no'",
")",
"==",
"'yes'",
")",
":",
"logger",
".",
"info",
"(",
"\"Dropping `{database}`.\"",
".",
"format",
"(",
"database",
"=",
"self",
".",
"database",
")",
")",
"try",
":",
"self",
".",
"connection",
".",
"query",
"(",
"\"DROP DATABASE `{database}`\"",
".",
"format",
"(",
"database",
"=",
"self",
".",
"database",
")",
")",
"logger",
".",
"info",
"(",
"\"Schema `{database}` was dropped successfully.\"",
".",
"format",
"(",
"database",
"=",
"self",
".",
"database",
")",
")",
"except",
"pymysql",
".",
"OperationalError",
":",
"raise",
"DataJointError",
"(",
"\"An attempt to drop schema `{database}` \"",
"\"has failed. Check permissions.\"",
".",
"format",
"(",
"database",
"=",
"self",
".",
"database",
")",
")"
] | Drop the associated schema if it exists | [
"Drop",
"the",
"associated",
"schema",
"if",
"it",
"exists"
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/schema.py#L146-L161 |
5,377 | datajoint/datajoint-python | datajoint/schema.py | Schema.process_relation_class | def process_relation_class(self, relation_class, context, assert_declared=False):
"""
assign schema properties to the relation class and declare the table
"""
relation_class.database = self.database
relation_class._connection = self.connection
relation_class._heading = Heading()
# instantiate the class, declare the table if not already
instance = relation_class()
is_declared = instance.is_declared
if not is_declared:
if not self.create_tables or assert_declared:
raise DataJointError('Table not declared %s' % instance.table_name)
else:
instance.declare(context)
is_declared = is_declared or instance.is_declared
# fill values in Lookup tables from their contents property
if isinstance(instance, Lookup) and hasattr(instance, 'contents') and is_declared:
contents = list(instance.contents)
if len(contents) > len(instance):
if instance.heading.has_autoincrement:
warnings.warn(
'Contents has changed but cannot be inserted because {table} has autoincrement.'.format(
table=instance.__class__.__name__))
else:
instance.insert(contents, skip_duplicates=True) | python | def process_relation_class(self, relation_class, context, assert_declared=False):
"""
assign schema properties to the relation class and declare the table
"""
relation_class.database = self.database
relation_class._connection = self.connection
relation_class._heading = Heading()
# instantiate the class, declare the table if not already
instance = relation_class()
is_declared = instance.is_declared
if not is_declared:
if not self.create_tables or assert_declared:
raise DataJointError('Table not declared %s' % instance.table_name)
else:
instance.declare(context)
is_declared = is_declared or instance.is_declared
# fill values in Lookup tables from their contents property
if isinstance(instance, Lookup) and hasattr(instance, 'contents') and is_declared:
contents = list(instance.contents)
if len(contents) > len(instance):
if instance.heading.has_autoincrement:
warnings.warn(
'Contents has changed but cannot be inserted because {table} has autoincrement.'.format(
table=instance.__class__.__name__))
else:
instance.insert(contents, skip_duplicates=True) | [
"def",
"process_relation_class",
"(",
"self",
",",
"relation_class",
",",
"context",
",",
"assert_declared",
"=",
"False",
")",
":",
"relation_class",
".",
"database",
"=",
"self",
".",
"database",
"relation_class",
".",
"_connection",
"=",
"self",
".",
"connection",
"relation_class",
".",
"_heading",
"=",
"Heading",
"(",
")",
"# instantiate the class, declare the table if not already",
"instance",
"=",
"relation_class",
"(",
")",
"is_declared",
"=",
"instance",
".",
"is_declared",
"if",
"not",
"is_declared",
":",
"if",
"not",
"self",
".",
"create_tables",
"or",
"assert_declared",
":",
"raise",
"DataJointError",
"(",
"'Table not declared %s'",
"%",
"instance",
".",
"table_name",
")",
"else",
":",
"instance",
".",
"declare",
"(",
"context",
")",
"is_declared",
"=",
"is_declared",
"or",
"instance",
".",
"is_declared",
"# fill values in Lookup tables from their contents property",
"if",
"isinstance",
"(",
"instance",
",",
"Lookup",
")",
"and",
"hasattr",
"(",
"instance",
",",
"'contents'",
")",
"and",
"is_declared",
":",
"contents",
"=",
"list",
"(",
"instance",
".",
"contents",
")",
"if",
"len",
"(",
"contents",
")",
">",
"len",
"(",
"instance",
")",
":",
"if",
"instance",
".",
"heading",
".",
"has_autoincrement",
":",
"warnings",
".",
"warn",
"(",
"'Contents has changed but cannot be inserted because {table} has autoincrement.'",
".",
"format",
"(",
"table",
"=",
"instance",
".",
"__class__",
".",
"__name__",
")",
")",
"else",
":",
"instance",
".",
"insert",
"(",
"contents",
",",
"skip_duplicates",
"=",
"True",
")"
] | assign schema properties to the relation class and declare the table | [
"assign",
"schema",
"properties",
"to",
"the",
"relation",
"class",
"and",
"declare",
"the",
"table"
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/schema.py#L171-L197 |
5,378 | datajoint/datajoint-python | datajoint/table.py | Table.declare | def declare(self, context=None):
"""
Use self.definition to declare the table in the schema.
"""
try:
sql, uses_external = declare(self.full_table_name, self.definition, context)
if uses_external:
sql = sql.format(external_table=self.external_table.full_table_name)
self.connection.query(sql)
except pymysql.OperationalError as error:
# skip if no create privilege
if error.args[0] == server_error_codes['command denied']:
logger.warning(error.args[1])
else:
raise
else:
self._log('Declared ' + self.full_table_name) | python | def declare(self, context=None):
"""
Use self.definition to declare the table in the schema.
"""
try:
sql, uses_external = declare(self.full_table_name, self.definition, context)
if uses_external:
sql = sql.format(external_table=self.external_table.full_table_name)
self.connection.query(sql)
except pymysql.OperationalError as error:
# skip if no create privilege
if error.args[0] == server_error_codes['command denied']:
logger.warning(error.args[1])
else:
raise
else:
self._log('Declared ' + self.full_table_name) | [
"def",
"declare",
"(",
"self",
",",
"context",
"=",
"None",
")",
":",
"try",
":",
"sql",
",",
"uses_external",
"=",
"declare",
"(",
"self",
".",
"full_table_name",
",",
"self",
".",
"definition",
",",
"context",
")",
"if",
"uses_external",
":",
"sql",
"=",
"sql",
".",
"format",
"(",
"external_table",
"=",
"self",
".",
"external_table",
".",
"full_table_name",
")",
"self",
".",
"connection",
".",
"query",
"(",
"sql",
")",
"except",
"pymysql",
".",
"OperationalError",
"as",
"error",
":",
"# skip if no create privilege",
"if",
"error",
".",
"args",
"[",
"0",
"]",
"==",
"server_error_codes",
"[",
"'command denied'",
"]",
":",
"logger",
".",
"warning",
"(",
"error",
".",
"args",
"[",
"1",
"]",
")",
"else",
":",
"raise",
"else",
":",
"self",
".",
"_log",
"(",
"'Declared '",
"+",
"self",
".",
"full_table_name",
")"
] | Use self.definition to declare the table in the schema. | [
"Use",
"self",
".",
"definition",
"to",
"declare",
"the",
"table",
"in",
"the",
"schema",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/table.py#L58-L74 |
5,379 | datajoint/datajoint-python | datajoint/table.py | Table.delete_quick | def delete_quick(self, get_count=False):
"""
Deletes the table without cascading and without user prompt.
If this table has populated dependent tables, this will fail.
"""
query = 'DELETE FROM ' + self.full_table_name + self.where_clause
self.connection.query(query)
count = self.connection.query("SELECT ROW_COUNT()").fetchone()[0] if get_count else None
self._log(query[:255])
return count | python | def delete_quick(self, get_count=False):
"""
Deletes the table without cascading and without user prompt.
If this table has populated dependent tables, this will fail.
"""
query = 'DELETE FROM ' + self.full_table_name + self.where_clause
self.connection.query(query)
count = self.connection.query("SELECT ROW_COUNT()").fetchone()[0] if get_count else None
self._log(query[:255])
return count | [
"def",
"delete_quick",
"(",
"self",
",",
"get_count",
"=",
"False",
")",
":",
"query",
"=",
"'DELETE FROM '",
"+",
"self",
".",
"full_table_name",
"+",
"self",
".",
"where_clause",
"self",
".",
"connection",
".",
"query",
"(",
"query",
")",
"count",
"=",
"self",
".",
"connection",
".",
"query",
"(",
"\"SELECT ROW_COUNT()\"",
")",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
"if",
"get_count",
"else",
"None",
"self",
".",
"_log",
"(",
"query",
"[",
":",
"255",
"]",
")",
"return",
"count"
] | Deletes the table without cascading and without user prompt.
If this table has populated dependent tables, this will fail. | [
"Deletes",
"the",
"table",
"without",
"cascading",
"and",
"without",
"user",
"prompt",
".",
"If",
"this",
"table",
"has",
"populated",
"dependent",
"tables",
"this",
"will",
"fail",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/table.py#L313-L322 |
5,380 | datajoint/datajoint-python | datajoint/table.py | Table._update | def _update(self, attrname, value=None):
"""
Updates a field in an existing tuple. This is not a datajoyous operation and should not be used
routinely. Relational database maintain referential integrity on the level of a tuple. Therefore,
the UPDATE operator can violate referential integrity. The datajoyous way to update information is
to delete the entire tuple and insert the entire update tuple.
Safety constraints:
1. self must be restricted to exactly one tuple
2. the update attribute must not be in primary key
Example
>>> (v2p.Mice() & key).update('mouse_dob', '2011-01-01')
>>> (v2p.Mice() & key).update( 'lens') # set the value to NULL
"""
if len(self) != 1:
raise DataJointError('Update is only allowed on one tuple at a time')
if attrname not in self.heading:
raise DataJointError('Invalid attribute name')
if attrname in self.heading.primary_key:
raise DataJointError('Cannot update a key value.')
attr = self.heading[attrname]
if attr.is_blob:
value = pack(value)
placeholder = '%s'
elif attr.numeric:
if value is None or np.isnan(np.float(value)): # nans are turned into NULLs
placeholder = 'NULL'
value = None
else:
placeholder = '%s'
value = str(int(value) if isinstance(value, bool) else value)
else:
placeholder = '%s'
command = "UPDATE {full_table_name} SET `{attrname}`={placeholder} {where_clause}".format(
full_table_name=self.from_clause,
attrname=attrname,
placeholder=placeholder,
where_clause=self.where_clause)
self.connection.query(command, args=(value, ) if value is not None else ()) | python | def _update(self, attrname, value=None):
"""
Updates a field in an existing tuple. This is not a datajoyous operation and should not be used
routinely. Relational database maintain referential integrity on the level of a tuple. Therefore,
the UPDATE operator can violate referential integrity. The datajoyous way to update information is
to delete the entire tuple and insert the entire update tuple.
Safety constraints:
1. self must be restricted to exactly one tuple
2. the update attribute must not be in primary key
Example
>>> (v2p.Mice() & key).update('mouse_dob', '2011-01-01')
>>> (v2p.Mice() & key).update( 'lens') # set the value to NULL
"""
if len(self) != 1:
raise DataJointError('Update is only allowed on one tuple at a time')
if attrname not in self.heading:
raise DataJointError('Invalid attribute name')
if attrname in self.heading.primary_key:
raise DataJointError('Cannot update a key value.')
attr = self.heading[attrname]
if attr.is_blob:
value = pack(value)
placeholder = '%s'
elif attr.numeric:
if value is None or np.isnan(np.float(value)): # nans are turned into NULLs
placeholder = 'NULL'
value = None
else:
placeholder = '%s'
value = str(int(value) if isinstance(value, bool) else value)
else:
placeholder = '%s'
command = "UPDATE {full_table_name} SET `{attrname}`={placeholder} {where_clause}".format(
full_table_name=self.from_clause,
attrname=attrname,
placeholder=placeholder,
where_clause=self.where_clause)
self.connection.query(command, args=(value, ) if value is not None else ()) | [
"def",
"_update",
"(",
"self",
",",
"attrname",
",",
"value",
"=",
"None",
")",
":",
"if",
"len",
"(",
"self",
")",
"!=",
"1",
":",
"raise",
"DataJointError",
"(",
"'Update is only allowed on one tuple at a time'",
")",
"if",
"attrname",
"not",
"in",
"self",
".",
"heading",
":",
"raise",
"DataJointError",
"(",
"'Invalid attribute name'",
")",
"if",
"attrname",
"in",
"self",
".",
"heading",
".",
"primary_key",
":",
"raise",
"DataJointError",
"(",
"'Cannot update a key value.'",
")",
"attr",
"=",
"self",
".",
"heading",
"[",
"attrname",
"]",
"if",
"attr",
".",
"is_blob",
":",
"value",
"=",
"pack",
"(",
"value",
")",
"placeholder",
"=",
"'%s'",
"elif",
"attr",
".",
"numeric",
":",
"if",
"value",
"is",
"None",
"or",
"np",
".",
"isnan",
"(",
"np",
".",
"float",
"(",
"value",
")",
")",
":",
"# nans are turned into NULLs",
"placeholder",
"=",
"'NULL'",
"value",
"=",
"None",
"else",
":",
"placeholder",
"=",
"'%s'",
"value",
"=",
"str",
"(",
"int",
"(",
"value",
")",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
"else",
"value",
")",
"else",
":",
"placeholder",
"=",
"'%s'",
"command",
"=",
"\"UPDATE {full_table_name} SET `{attrname}`={placeholder} {where_clause}\"",
".",
"format",
"(",
"full_table_name",
"=",
"self",
".",
"from_clause",
",",
"attrname",
"=",
"attrname",
",",
"placeholder",
"=",
"placeholder",
",",
"where_clause",
"=",
"self",
".",
"where_clause",
")",
"self",
".",
"connection",
".",
"query",
"(",
"command",
",",
"args",
"=",
"(",
"value",
",",
")",
"if",
"value",
"is",
"not",
"None",
"else",
"(",
")",
")"
] | Updates a field in an existing tuple. This is not a datajoyous operation and should not be used
routinely. Relational database maintain referential integrity on the level of a tuple. Therefore,
the UPDATE operator can violate referential integrity. The datajoyous way to update information is
to delete the entire tuple and insert the entire update tuple.
Safety constraints:
1. self must be restricted to exactly one tuple
2. the update attribute must not be in primary key
Example
>>> (v2p.Mice() & key).update('mouse_dob', '2011-01-01')
>>> (v2p.Mice() & key).update( 'lens') # set the value to NULL | [
"Updates",
"a",
"field",
"in",
"an",
"existing",
"tuple",
".",
"This",
"is",
"not",
"a",
"datajoyous",
"operation",
"and",
"should",
"not",
"be",
"used",
"routinely",
".",
"Relational",
"database",
"maintain",
"referential",
"integrity",
"on",
"the",
"level",
"of",
"a",
"tuple",
".",
"Therefore",
"the",
"UPDATE",
"operator",
"can",
"violate",
"referential",
"integrity",
".",
"The",
"datajoyous",
"way",
"to",
"update",
"information",
"is",
"to",
"delete",
"the",
"entire",
"tuple",
"and",
"insert",
"the",
"entire",
"update",
"tuple",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/table.py#L525-L568 |
5,381 | datajoint/datajoint-python | datajoint/expression.py | QueryExpression.where_clause | def where_clause(self):
"""
convert self.restriction to the SQL WHERE clause
"""
cond = self._make_condition(self.restriction)
return '' if cond is True else ' WHERE %s' % cond | python | def where_clause(self):
"""
convert self.restriction to the SQL WHERE clause
"""
cond = self._make_condition(self.restriction)
return '' if cond is True else ' WHERE %s' % cond | [
"def",
"where_clause",
"(",
"self",
")",
":",
"cond",
"=",
"self",
".",
"_make_condition",
"(",
"self",
".",
"restriction",
")",
"return",
"''",
"if",
"cond",
"is",
"True",
"else",
"' WHERE %s'",
"%",
"cond"
] | convert self.restriction to the SQL WHERE clause | [
"convert",
"self",
".",
"restriction",
"to",
"the",
"SQL",
"WHERE",
"clause"
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/expression.py#L196-L201 |
5,382 | datajoint/datajoint-python | datajoint/expression.py | QueryExpression.preview | def preview(self, limit=None, width=None):
"""
returns a preview of the contents of the query.
"""
heading = self.heading
rel = self.proj(*heading.non_blobs)
if limit is None:
limit = config['display.limit']
if width is None:
width = config['display.width']
tuples = rel.fetch(limit=limit+1, format="array")
has_more = len(tuples) > limit
tuples = tuples[:limit]
columns = heading.names
widths = {f: min(max([len(f)] +
[len(str(e)) for e in tuples[f]] if f in tuples.dtype.names else [len('=BLOB=')]) + 4, width) for f in columns}
templates = {f: '%%-%d.%ds' % (widths[f], widths[f]) for f in columns}
return (
' '.join([templates[f] % ('*' + f if f in rel.primary_key else f) for f in columns]) + '\n' +
' '.join(['+' + '-' * (widths[column] - 2) + '+' for column in columns]) + '\n' +
'\n'.join(' '.join(templates[f] % (tup[f] if f in tup.dtype.names else '=BLOB=')
for f in columns) for tup in tuples) +
('\n ...\n' if has_more else '\n') +
(' (Total: %d)\n' % len(rel) if config['display.show_tuple_count'] else '')) | python | def preview(self, limit=None, width=None):
"""
returns a preview of the contents of the query.
"""
heading = self.heading
rel = self.proj(*heading.non_blobs)
if limit is None:
limit = config['display.limit']
if width is None:
width = config['display.width']
tuples = rel.fetch(limit=limit+1, format="array")
has_more = len(tuples) > limit
tuples = tuples[:limit]
columns = heading.names
widths = {f: min(max([len(f)] +
[len(str(e)) for e in tuples[f]] if f in tuples.dtype.names else [len('=BLOB=')]) + 4, width) for f in columns}
templates = {f: '%%-%d.%ds' % (widths[f], widths[f]) for f in columns}
return (
' '.join([templates[f] % ('*' + f if f in rel.primary_key else f) for f in columns]) + '\n' +
' '.join(['+' + '-' * (widths[column] - 2) + '+' for column in columns]) + '\n' +
'\n'.join(' '.join(templates[f] % (tup[f] if f in tup.dtype.names else '=BLOB=')
for f in columns) for tup in tuples) +
('\n ...\n' if has_more else '\n') +
(' (Total: %d)\n' % len(rel) if config['display.show_tuple_count'] else '')) | [
"def",
"preview",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"width",
"=",
"None",
")",
":",
"heading",
"=",
"self",
".",
"heading",
"rel",
"=",
"self",
".",
"proj",
"(",
"*",
"heading",
".",
"non_blobs",
")",
"if",
"limit",
"is",
"None",
":",
"limit",
"=",
"config",
"[",
"'display.limit'",
"]",
"if",
"width",
"is",
"None",
":",
"width",
"=",
"config",
"[",
"'display.width'",
"]",
"tuples",
"=",
"rel",
".",
"fetch",
"(",
"limit",
"=",
"limit",
"+",
"1",
",",
"format",
"=",
"\"array\"",
")",
"has_more",
"=",
"len",
"(",
"tuples",
")",
">",
"limit",
"tuples",
"=",
"tuples",
"[",
":",
"limit",
"]",
"columns",
"=",
"heading",
".",
"names",
"widths",
"=",
"{",
"f",
":",
"min",
"(",
"max",
"(",
"[",
"len",
"(",
"f",
")",
"]",
"+",
"[",
"len",
"(",
"str",
"(",
"e",
")",
")",
"for",
"e",
"in",
"tuples",
"[",
"f",
"]",
"]",
"if",
"f",
"in",
"tuples",
".",
"dtype",
".",
"names",
"else",
"[",
"len",
"(",
"'=BLOB='",
")",
"]",
")",
"+",
"4",
",",
"width",
")",
"for",
"f",
"in",
"columns",
"}",
"templates",
"=",
"{",
"f",
":",
"'%%-%d.%ds'",
"%",
"(",
"widths",
"[",
"f",
"]",
",",
"widths",
"[",
"f",
"]",
")",
"for",
"f",
"in",
"columns",
"}",
"return",
"(",
"' '",
".",
"join",
"(",
"[",
"templates",
"[",
"f",
"]",
"%",
"(",
"'*'",
"+",
"f",
"if",
"f",
"in",
"rel",
".",
"primary_key",
"else",
"f",
")",
"for",
"f",
"in",
"columns",
"]",
")",
"+",
"'\\n'",
"+",
"' '",
".",
"join",
"(",
"[",
"'+'",
"+",
"'-'",
"*",
"(",
"widths",
"[",
"column",
"]",
"-",
"2",
")",
"+",
"'+'",
"for",
"column",
"in",
"columns",
"]",
")",
"+",
"'\\n'",
"+",
"'\\n'",
".",
"join",
"(",
"' '",
".",
"join",
"(",
"templates",
"[",
"f",
"]",
"%",
"(",
"tup",
"[",
"f",
"]",
"if",
"f",
"in",
"tup",
".",
"dtype",
".",
"names",
"else",
"'=BLOB='",
")",
"for",
"f",
"in",
"columns",
")",
"for",
"tup",
"in",
"tuples",
")",
"+",
"(",
"'\\n ...\\n'",
"if",
"has_more",
"else",
"'\\n'",
")",
"+",
"(",
"' (Total: %d)\\n'",
"%",
"len",
"(",
"rel",
")",
"if",
"config",
"[",
"'display.show_tuple_count'",
"]",
"else",
"''",
")",
")"
] | returns a preview of the contents of the query. | [
"returns",
"a",
"preview",
"of",
"the",
"contents",
"of",
"the",
"query",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/expression.py#L382-L405 |
5,383 | datajoint/datajoint-python | datajoint/expression.py | Join.make_argument_subquery | def make_argument_subquery(arg):
"""
Decide when a Join argument needs to be wrapped in a subquery
"""
return Subquery.create(arg) if isinstance(arg, (GroupBy, Projection)) or arg.restriction else arg | python | def make_argument_subquery(arg):
"""
Decide when a Join argument needs to be wrapped in a subquery
"""
return Subquery.create(arg) if isinstance(arg, (GroupBy, Projection)) or arg.restriction else arg | [
"def",
"make_argument_subquery",
"(",
"arg",
")",
":",
"return",
"Subquery",
".",
"create",
"(",
"arg",
")",
"if",
"isinstance",
"(",
"arg",
",",
"(",
"GroupBy",
",",
"Projection",
")",
")",
"or",
"arg",
".",
"restriction",
"else",
"arg"
] | Decide when a Join argument needs to be wrapped in a subquery | [
"Decide",
"when",
"a",
"Join",
"argument",
"needs",
"to",
"be",
"wrapped",
"in",
"a",
"subquery"
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/expression.py#L606-L610 |
5,384 | datajoint/datajoint-python | datajoint/expression.py | Projection._need_subquery | def _need_subquery(arg, attributes, named_attributes):
"""
Decide whether the projection argument needs to be wrapped in a subquery
"""
if arg.heading.expressions or arg.distinct: # argument has any renamed (computed) attributes
return True
restricting_attributes = arg.attributes_in_restriction()
return (not restricting_attributes.issubset(attributes) or # if any restricting attribute is projected out or
any(v.strip() in restricting_attributes for v in named_attributes.values())) | python | def _need_subquery(arg, attributes, named_attributes):
"""
Decide whether the projection argument needs to be wrapped in a subquery
"""
if arg.heading.expressions or arg.distinct: # argument has any renamed (computed) attributes
return True
restricting_attributes = arg.attributes_in_restriction()
return (not restricting_attributes.issubset(attributes) or # if any restricting attribute is projected out or
any(v.strip() in restricting_attributes for v in named_attributes.values())) | [
"def",
"_need_subquery",
"(",
"arg",
",",
"attributes",
",",
"named_attributes",
")",
":",
"if",
"arg",
".",
"heading",
".",
"expressions",
"or",
"arg",
".",
"distinct",
":",
"# argument has any renamed (computed) attributes",
"return",
"True",
"restricting_attributes",
"=",
"arg",
".",
"attributes_in_restriction",
"(",
")",
"return",
"(",
"not",
"restricting_attributes",
".",
"issubset",
"(",
"attributes",
")",
"or",
"# if any restricting attribute is projected out or",
"any",
"(",
"v",
".",
"strip",
"(",
")",
"in",
"restricting_attributes",
"for",
"v",
"in",
"named_attributes",
".",
"values",
"(",
")",
")",
")"
] | Decide whether the projection argument needs to be wrapped in a subquery | [
"Decide",
"whether",
"the",
"projection",
"argument",
"needs",
"to",
"be",
"wrapped",
"in",
"a",
"subquery"
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/expression.py#L717-L725 |
5,385 | datajoint/datajoint-python | datajoint/expression.py | Subquery.create | def create(cls, arg):
"""
construct a subquery from arg
"""
obj = cls()
obj._connection = arg.connection
obj._heading = arg.heading.make_subquery_heading()
obj._arg = arg
return obj | python | def create(cls, arg):
"""
construct a subquery from arg
"""
obj = cls()
obj._connection = arg.connection
obj._heading = arg.heading.make_subquery_heading()
obj._arg = arg
return obj | [
"def",
"create",
"(",
"cls",
",",
"arg",
")",
":",
"obj",
"=",
"cls",
"(",
")",
"obj",
".",
"_connection",
"=",
"arg",
".",
"connection",
"obj",
".",
"_heading",
"=",
"arg",
".",
"heading",
".",
"make_subquery_heading",
"(",
")",
"obj",
".",
"_arg",
"=",
"arg",
"return",
"obj"
] | construct a subquery from arg | [
"construct",
"a",
"subquery",
"from",
"arg"
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/expression.py#L800-L808 |
5,386 | datajoint/datajoint-python | datajoint/utils.py | user_choice | def user_choice(prompt, choices=("yes", "no"), default=None):
"""
Prompts the user for confirmation. The default value, if any, is capitalized.
:param prompt: Information to display to the user.
:param choices: an iterable of possible choices.
:param default: default choice
:return: the user's choice
"""
assert default is None or default in choices
choice_list = ', '.join((choice.title() if choice == default else choice for choice in choices))
response = None
while response not in choices:
response = input(prompt + ' [' + choice_list + ']: ')
response = response.lower() if response else default
return response | python | def user_choice(prompt, choices=("yes", "no"), default=None):
"""
Prompts the user for confirmation. The default value, if any, is capitalized.
:param prompt: Information to display to the user.
:param choices: an iterable of possible choices.
:param default: default choice
:return: the user's choice
"""
assert default is None or default in choices
choice_list = ', '.join((choice.title() if choice == default else choice for choice in choices))
response = None
while response not in choices:
response = input(prompt + ' [' + choice_list + ']: ')
response = response.lower() if response else default
return response | [
"def",
"user_choice",
"(",
"prompt",
",",
"choices",
"=",
"(",
"\"yes\"",
",",
"\"no\"",
")",
",",
"default",
"=",
"None",
")",
":",
"assert",
"default",
"is",
"None",
"or",
"default",
"in",
"choices",
"choice_list",
"=",
"', '",
".",
"join",
"(",
"(",
"choice",
".",
"title",
"(",
")",
"if",
"choice",
"==",
"default",
"else",
"choice",
"for",
"choice",
"in",
"choices",
")",
")",
"response",
"=",
"None",
"while",
"response",
"not",
"in",
"choices",
":",
"response",
"=",
"input",
"(",
"prompt",
"+",
"' ['",
"+",
"choice_list",
"+",
"']: '",
")",
"response",
"=",
"response",
".",
"lower",
"(",
")",
"if",
"response",
"else",
"default",
"return",
"response"
] | Prompts the user for confirmation. The default value, if any, is capitalized.
:param prompt: Information to display to the user.
:param choices: an iterable of possible choices.
:param default: default choice
:return: the user's choice | [
"Prompts",
"the",
"user",
"for",
"confirmation",
".",
"The",
"default",
"value",
"if",
"any",
"is",
"capitalized",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/utils.py#L16-L31 |
5,387 | datajoint/datajoint-python | datajoint/fetch.py | to_dicts | def to_dicts(recarray):
"""convert record array to a dictionaries"""
for rec in recarray:
yield dict(zip(recarray.dtype.names, rec.tolist())) | python | def to_dicts(recarray):
"""convert record array to a dictionaries"""
for rec in recarray:
yield dict(zip(recarray.dtype.names, rec.tolist())) | [
"def",
"to_dicts",
"(",
"recarray",
")",
":",
"for",
"rec",
"in",
"recarray",
":",
"yield",
"dict",
"(",
"zip",
"(",
"recarray",
".",
"dtype",
".",
"names",
",",
"rec",
".",
"tolist",
"(",
")",
")",
")"
] | convert record array to a dictionaries | [
"convert",
"record",
"array",
"to",
"a",
"dictionaries"
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/fetch.py#L24-L27 |
5,388 | datajoint/datajoint-python | datajoint/fetch.py | Fetch.keys | def keys(self, **kwargs):
"""
DEPRECATED
Iterator that returns primary keys as a sequence of dicts.
"""
warnings.warn('Use of `rel.fetch.keys()` notation is deprecated. '
'Please use `rel.fetch("KEY")` or `rel.fetch(dj.key)` for equivalent result', stacklevel=2)
yield from self._expression.proj().fetch(as_dict=True, **kwargs) | python | def keys(self, **kwargs):
"""
DEPRECATED
Iterator that returns primary keys as a sequence of dicts.
"""
warnings.warn('Use of `rel.fetch.keys()` notation is deprecated. '
'Please use `rel.fetch("KEY")` or `rel.fetch(dj.key)` for equivalent result', stacklevel=2)
yield from self._expression.proj().fetch(as_dict=True, **kwargs) | [
"def",
"keys",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"'Use of `rel.fetch.keys()` notation is deprecated. '",
"'Please use `rel.fetch(\"KEY\")` or `rel.fetch(dj.key)` for equivalent result'",
",",
"stacklevel",
"=",
"2",
")",
"yield",
"from",
"self",
".",
"_expression",
".",
"proj",
"(",
")",
".",
"fetch",
"(",
"as_dict",
"=",
"True",
",",
"*",
"*",
"kwargs",
")"
] | DEPRECATED
Iterator that returns primary keys as a sequence of dicts. | [
"DEPRECATED",
"Iterator",
"that",
"returns",
"primary",
"keys",
"as",
"a",
"sequence",
"of",
"dicts",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/fetch.py#L131-L138 |
5,389 | datajoint/datajoint-python | datajoint/hash.py | key_hash | def key_hash(key):
"""
32-byte hash used for lookup of primary keys of jobs
"""
hashed = hashlib.md5()
for k, v in sorted(key.items()):
hashed.update(str(v).encode())
return hashed.hexdigest() | python | def key_hash(key):
"""
32-byte hash used for lookup of primary keys of jobs
"""
hashed = hashlib.md5()
for k, v in sorted(key.items()):
hashed.update(str(v).encode())
return hashed.hexdigest() | [
"def",
"key_hash",
"(",
"key",
")",
":",
"hashed",
"=",
"hashlib",
".",
"md5",
"(",
")",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"key",
".",
"items",
"(",
")",
")",
":",
"hashed",
".",
"update",
"(",
"str",
"(",
"v",
")",
".",
"encode",
"(",
")",
")",
"return",
"hashed",
".",
"hexdigest",
"(",
")"
] | 32-byte hash used for lookup of primary keys of jobs | [
"32",
"-",
"byte",
"hash",
"used",
"for",
"lookup",
"of",
"primary",
"keys",
"of",
"jobs"
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/hash.py#L5-L12 |
5,390 | datajoint/datajoint-python | datajoint/connection.py | conn | def conn(host=None, user=None, password=None, init_fun=None, reset=False):
"""
Returns a persistent connection object to be shared by multiple modules.
If the connection is not yet established or reset=True, a new connection is set up.
If connection information is not provided, it is taken from config which takes the
information from dj_local_conf.json. If the password is not specified in that file
datajoint prompts for the password.
:param host: hostname
:param user: mysql user
:param password: mysql password
:param init_fun: initialization function
:param reset: whether the connection should be reset or not
"""
if not hasattr(conn, 'connection') or reset:
host = host if host is not None else config['database.host']
user = user if user is not None else config['database.user']
password = password if password is not None else config['database.password']
if user is None: # pragma: no cover
user = input("Please enter DataJoint username: ")
if password is None: # pragma: no cover
password = getpass(prompt="Please enter DataJoint password: ")
init_fun = init_fun if init_fun is not None else config['connection.init_function']
conn.connection = Connection(host, user, password, init_fun)
return conn.connection | python | def conn(host=None, user=None, password=None, init_fun=None, reset=False):
"""
Returns a persistent connection object to be shared by multiple modules.
If the connection is not yet established or reset=True, a new connection is set up.
If connection information is not provided, it is taken from config which takes the
information from dj_local_conf.json. If the password is not specified in that file
datajoint prompts for the password.
:param host: hostname
:param user: mysql user
:param password: mysql password
:param init_fun: initialization function
:param reset: whether the connection should be reset or not
"""
if not hasattr(conn, 'connection') or reset:
host = host if host is not None else config['database.host']
user = user if user is not None else config['database.user']
password = password if password is not None else config['database.password']
if user is None: # pragma: no cover
user = input("Please enter DataJoint username: ")
if password is None: # pragma: no cover
password = getpass(prompt="Please enter DataJoint password: ")
init_fun = init_fun if init_fun is not None else config['connection.init_function']
conn.connection = Connection(host, user, password, init_fun)
return conn.connection | [
"def",
"conn",
"(",
"host",
"=",
"None",
",",
"user",
"=",
"None",
",",
"password",
"=",
"None",
",",
"init_fun",
"=",
"None",
",",
"reset",
"=",
"False",
")",
":",
"if",
"not",
"hasattr",
"(",
"conn",
",",
"'connection'",
")",
"or",
"reset",
":",
"host",
"=",
"host",
"if",
"host",
"is",
"not",
"None",
"else",
"config",
"[",
"'database.host'",
"]",
"user",
"=",
"user",
"if",
"user",
"is",
"not",
"None",
"else",
"config",
"[",
"'database.user'",
"]",
"password",
"=",
"password",
"if",
"password",
"is",
"not",
"None",
"else",
"config",
"[",
"'database.password'",
"]",
"if",
"user",
"is",
"None",
":",
"# pragma: no cover",
"user",
"=",
"input",
"(",
"\"Please enter DataJoint username: \"",
")",
"if",
"password",
"is",
"None",
":",
"# pragma: no cover",
"password",
"=",
"getpass",
"(",
"prompt",
"=",
"\"Please enter DataJoint password: \"",
")",
"init_fun",
"=",
"init_fun",
"if",
"init_fun",
"is",
"not",
"None",
"else",
"config",
"[",
"'connection.init_function'",
"]",
"conn",
".",
"connection",
"=",
"Connection",
"(",
"host",
",",
"user",
",",
"password",
",",
"init_fun",
")",
"return",
"conn",
".",
"connection"
] | Returns a persistent connection object to be shared by multiple modules.
If the connection is not yet established or reset=True, a new connection is set up.
If connection information is not provided, it is taken from config which takes the
information from dj_local_conf.json. If the password is not specified in that file
datajoint prompts for the password.
:param host: hostname
:param user: mysql user
:param password: mysql password
:param init_fun: initialization function
:param reset: whether the connection should be reset or not | [
"Returns",
"a",
"persistent",
"connection",
"object",
"to",
"be",
"shared",
"by",
"multiple",
"modules",
".",
"If",
"the",
"connection",
"is",
"not",
"yet",
"established",
"or",
"reset",
"=",
"True",
"a",
"new",
"connection",
"is",
"set",
"up",
".",
"If",
"connection",
"information",
"is",
"not",
"provided",
"it",
"is",
"taken",
"from",
"config",
"which",
"takes",
"the",
"information",
"from",
"dj_local_conf",
".",
"json",
".",
"If",
"the",
"password",
"is",
"not",
"specified",
"in",
"that",
"file",
"datajoint",
"prompts",
"for",
"the",
"password",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/connection.py#L20-L44 |
5,391 | datajoint/datajoint-python | datajoint/connection.py | Connection.connect | def connect(self):
"""
Connects to the database server.
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '.*deprecated.*')
self._conn = client.connect(
init_command=self.init_fun,
sql_mode="NO_ZERO_DATE,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,"
"STRICT_ALL_TABLES,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION",
charset=config['connection.charset'],
**self.conn_info)
self._conn.autocommit(True) | python | def connect(self):
"""
Connects to the database server.
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '.*deprecated.*')
self._conn = client.connect(
init_command=self.init_fun,
sql_mode="NO_ZERO_DATE,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,"
"STRICT_ALL_TABLES,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION",
charset=config['connection.charset'],
**self.conn_info)
self._conn.autocommit(True) | [
"def",
"connect",
"(",
"self",
")",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"filterwarnings",
"(",
"'ignore'",
",",
"'.*deprecated.*'",
")",
"self",
".",
"_conn",
"=",
"client",
".",
"connect",
"(",
"init_command",
"=",
"self",
".",
"init_fun",
",",
"sql_mode",
"=",
"\"NO_ZERO_DATE,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,\"",
"\"STRICT_ALL_TABLES,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION\"",
",",
"charset",
"=",
"config",
"[",
"'connection.charset'",
"]",
",",
"*",
"*",
"self",
".",
"conn_info",
")",
"self",
".",
"_conn",
".",
"autocommit",
"(",
"True",
")"
] | Connects to the database server. | [
"Connects",
"to",
"the",
"database",
"server",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/connection.py#L88-L100 |
5,392 | datajoint/datajoint-python | datajoint/connection.py | Connection.start_transaction | def start_transaction(self):
"""
Starts a transaction error.
:raise DataJointError: if there is an ongoing transaction.
"""
if self.in_transaction:
raise DataJointError("Nested connections are not supported.")
self.query('START TRANSACTION WITH CONSISTENT SNAPSHOT')
self._in_transaction = True
logger.info("Transaction started") | python | def start_transaction(self):
"""
Starts a transaction error.
:raise DataJointError: if there is an ongoing transaction.
"""
if self.in_transaction:
raise DataJointError("Nested connections are not supported.")
self.query('START TRANSACTION WITH CONSISTENT SNAPSHOT')
self._in_transaction = True
logger.info("Transaction started") | [
"def",
"start_transaction",
"(",
"self",
")",
":",
"if",
"self",
".",
"in_transaction",
":",
"raise",
"DataJointError",
"(",
"\"Nested connections are not supported.\"",
")",
"self",
".",
"query",
"(",
"'START TRANSACTION WITH CONSISTENT SNAPSHOT'",
")",
"self",
".",
"_in_transaction",
"=",
"True",
"logger",
".",
"info",
"(",
"\"Transaction started\"",
")"
] | Starts a transaction error.
:raise DataJointError: if there is an ongoing transaction. | [
"Starts",
"a",
"transaction",
"error",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/connection.py#L185-L195 |
5,393 | datajoint/datajoint-python | datajoint/settings.py | Config.save_global | def save_global(self, verbose=False):
"""
saves the settings in the global config file
"""
self.save(os.path.expanduser(os.path.join('~', GLOBALCONFIG)), verbose) | python | def save_global(self, verbose=False):
"""
saves the settings in the global config file
"""
self.save(os.path.expanduser(os.path.join('~', GLOBALCONFIG)), verbose) | [
"def",
"save_global",
"(",
"self",
",",
"verbose",
"=",
"False",
")",
":",
"self",
".",
"save",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'~'",
",",
"GLOBALCONFIG",
")",
")",
",",
"verbose",
")"
] | saves the settings in the global config file | [
"saves",
"the",
"settings",
"in",
"the",
"global",
"config",
"file"
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/settings.py#L121-L125 |
5,394 | datajoint/datajoint-python | datajoint/heading.py | Attribute.todict | def todict(self):
"""Convert namedtuple to dict."""
return OrderedDict((name, self[i]) for i, name in enumerate(self._fields)) | python | def todict(self):
"""Convert namedtuple to dict."""
return OrderedDict((name, self[i]) for i, name in enumerate(self._fields)) | [
"def",
"todict",
"(",
"self",
")",
":",
"return",
"OrderedDict",
"(",
"(",
"name",
",",
"self",
"[",
"i",
"]",
")",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"self",
".",
"_fields",
")",
")"
] | Convert namedtuple to dict. | [
"Convert",
"namedtuple",
"to",
"dict",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/heading.py#L20-L22 |
5,395 | datajoint/datajoint-python | datajoint/heading.py | Heading.as_dtype | def as_dtype(self):
"""
represent the heading as a numpy dtype
"""
return np.dtype(dict(
names=self.names,
formats=[v.dtype for v in self.attributes.values()])) | python | def as_dtype(self):
"""
represent the heading as a numpy dtype
"""
return np.dtype(dict(
names=self.names,
formats=[v.dtype for v in self.attributes.values()])) | [
"def",
"as_dtype",
"(",
"self",
")",
":",
"return",
"np",
".",
"dtype",
"(",
"dict",
"(",
"names",
"=",
"self",
".",
"names",
",",
"formats",
"=",
"[",
"v",
".",
"dtype",
"for",
"v",
"in",
"self",
".",
"attributes",
".",
"values",
"(",
")",
"]",
")",
")"
] | represent the heading as a numpy dtype | [
"represent",
"the",
"heading",
"as",
"a",
"numpy",
"dtype"
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/heading.py#L112-L118 |
5,396 | datajoint/datajoint-python | datajoint/heading.py | Heading.as_sql | def as_sql(self):
"""
represent heading as SQL field list
"""
return ','.join('`%s`' % name if self.attributes[name].sql_expression is None
else '%s as `%s`' % (self.attributes[name].sql_expression, name)
for name in self.names) | python | def as_sql(self):
"""
represent heading as SQL field list
"""
return ','.join('`%s`' % name if self.attributes[name].sql_expression is None
else '%s as `%s`' % (self.attributes[name].sql_expression, name)
for name in self.names) | [
"def",
"as_sql",
"(",
"self",
")",
":",
"return",
"','",
".",
"join",
"(",
"'`%s`'",
"%",
"name",
"if",
"self",
".",
"attributes",
"[",
"name",
"]",
".",
"sql_expression",
"is",
"None",
"else",
"'%s as `%s`'",
"%",
"(",
"self",
".",
"attributes",
"[",
"name",
"]",
".",
"sql_expression",
",",
"name",
")",
"for",
"name",
"in",
"self",
".",
"names",
")"
] | represent heading as SQL field list | [
"represent",
"heading",
"as",
"SQL",
"field",
"list"
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/heading.py#L121-L127 |
5,397 | datajoint/datajoint-python | datajoint/heading.py | Heading.join | def join(self, other):
"""
Join two headings into a new one.
It assumes that self and other are headings that share no common dependent attributes.
"""
return Heading(
[self.attributes[name].todict() for name in self.primary_key] +
[other.attributes[name].todict() for name in other.primary_key if name not in self.primary_key] +
[self.attributes[name].todict() for name in self.dependent_attributes if name not in other.primary_key] +
[other.attributes[name].todict() for name in other.dependent_attributes if name not in self.primary_key]) | python | def join(self, other):
"""
Join two headings into a new one.
It assumes that self and other are headings that share no common dependent attributes.
"""
return Heading(
[self.attributes[name].todict() for name in self.primary_key] +
[other.attributes[name].todict() for name in other.primary_key if name not in self.primary_key] +
[self.attributes[name].todict() for name in self.dependent_attributes if name not in other.primary_key] +
[other.attributes[name].todict() for name in other.dependent_attributes if name not in self.primary_key]) | [
"def",
"join",
"(",
"self",
",",
"other",
")",
":",
"return",
"Heading",
"(",
"[",
"self",
".",
"attributes",
"[",
"name",
"]",
".",
"todict",
"(",
")",
"for",
"name",
"in",
"self",
".",
"primary_key",
"]",
"+",
"[",
"other",
".",
"attributes",
"[",
"name",
"]",
".",
"todict",
"(",
")",
"for",
"name",
"in",
"other",
".",
"primary_key",
"if",
"name",
"not",
"in",
"self",
".",
"primary_key",
"]",
"+",
"[",
"self",
".",
"attributes",
"[",
"name",
"]",
".",
"todict",
"(",
")",
"for",
"name",
"in",
"self",
".",
"dependent_attributes",
"if",
"name",
"not",
"in",
"other",
".",
"primary_key",
"]",
"+",
"[",
"other",
".",
"attributes",
"[",
"name",
"]",
".",
"todict",
"(",
")",
"for",
"name",
"in",
"other",
".",
"dependent_attributes",
"if",
"name",
"not",
"in",
"self",
".",
"primary_key",
"]",
")"
] | Join two headings into a new one.
It assumes that self and other are headings that share no common dependent attributes. | [
"Join",
"two",
"headings",
"into",
"a",
"new",
"one",
".",
"It",
"assumes",
"that",
"self",
"and",
"other",
"are",
"headings",
"that",
"share",
"no",
"common",
"dependent",
"attributes",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/heading.py#L271-L280 |
5,398 | datajoint/datajoint-python | datajoint/heading.py | Heading.make_subquery_heading | def make_subquery_heading(self):
"""
Create a new heading with removed attribute sql_expressions.
Used by subqueries, which resolve the sql_expressions.
"""
return Heading(dict(v.todict(), sql_expression=None) for v in self.attributes.values()) | python | def make_subquery_heading(self):
"""
Create a new heading with removed attribute sql_expressions.
Used by subqueries, which resolve the sql_expressions.
"""
return Heading(dict(v.todict(), sql_expression=None) for v in self.attributes.values()) | [
"def",
"make_subquery_heading",
"(",
"self",
")",
":",
"return",
"Heading",
"(",
"dict",
"(",
"v",
".",
"todict",
"(",
")",
",",
"sql_expression",
"=",
"None",
")",
"for",
"v",
"in",
"self",
".",
"attributes",
".",
"values",
"(",
")",
")"
] | Create a new heading with removed attribute sql_expressions.
Used by subqueries, which resolve the sql_expressions. | [
"Create",
"a",
"new",
"heading",
"with",
"removed",
"attribute",
"sql_expressions",
".",
"Used",
"by",
"subqueries",
"which",
"resolve",
"the",
"sql_expressions",
"."
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/heading.py#L282-L287 |
5,399 | datajoint/datajoint-python | datajoint/blob.py | BlobReader.squeeze | def squeeze(self, array):
"""
Simplify the given array as much as possible - squeeze out all singleton
dimensions and also convert a zero dimensional array into array scalar
"""
if not self._squeeze:
return array
array = array.copy()
array = array.squeeze()
if array.ndim == 0:
array = array[()]
return array | python | def squeeze(self, array):
"""
Simplify the given array as much as possible - squeeze out all singleton
dimensions and also convert a zero dimensional array into array scalar
"""
if not self._squeeze:
return array
array = array.copy()
array = array.squeeze()
if array.ndim == 0:
array = array[()]
return array | [
"def",
"squeeze",
"(",
"self",
",",
"array",
")",
":",
"if",
"not",
"self",
".",
"_squeeze",
":",
"return",
"array",
"array",
"=",
"array",
".",
"copy",
"(",
")",
"array",
"=",
"array",
".",
"squeeze",
"(",
")",
"if",
"array",
".",
"ndim",
"==",
"0",
":",
"array",
"=",
"array",
"[",
"(",
")",
"]",
"return",
"array"
] | Simplify the given array as much as possible - squeeze out all singleton
dimensions and also convert a zero dimensional array into array scalar | [
"Simplify",
"the",
"given",
"array",
"as",
"much",
"as",
"possible",
"-",
"squeeze",
"out",
"all",
"singleton",
"dimensions",
"and",
"also",
"convert",
"a",
"zero",
"dimensional",
"array",
"into",
"array",
"scalar"
] | 4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/blob.py#L152-L163 |
Subsets and Splits