body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
4a8279a046b80c607cc1d120e006480daea41a5e23f15a149cbda95ae25e8b38 | def smooth_density_legacy(coords, tile_width, tile_height, n):
'legacy function to homogenize distribution of points within a\n rectangular area by reducing the number of points within\n n**2 equally-sized bounding boxes to\n the minimum number of points in one of those boxes.\n\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 numpy array of coordinates to consider\n tile_width : int\n width of rectangular area containing coords\n tile_height : int\n height of rectangular area containing coords\n n : int\n number of subdivisions into which tile_width and tile_height\n should be divided\n\n Returns\n -------\n smoothed_coords : numpy.ndarray\n Nx2 numpy array of smoothed subset of input coords\n '
min_count = np.Inf
for i in range(n):
r = np.arange(((i * tile_width) / n), (((i + 1) * tile_width) / n))
for j in range(n):
c = np.arange(((j * tile_height) / n), (((j + 1) * tile_height) / n))
ind = np.argwhere(((((coords[(:, 0)] >= r.min()) & (coords[(:, 0)] <= r.max())) & (coords[(:, 1)] >= c.min())) & (coords[(:, 1)] <= c.max()))).flatten()
if (ind.size < min_count):
min_count = ind.size
new_coords = []
for i in range(n):
r = np.arange(((i * tile_width) / n), (((i + 1) * tile_width) / n))
for j in range(n):
c = np.arange(((j * tile_height) / n), (((j + 1) * tile_height) / n))
ind = np.argwhere(((((coords[(:, 0)] >= r.min()) & (coords[(:, 0)] <= r.max())) & (coords[(:, 1)] >= c.min())) & (coords[(:, 1)] <= c.max()))).flatten()
a = np.arange(ind.size)
np.random.shuffle(a)
ind = ind[a[0:min_count]]
new_coords.append(coords[ind])
return np.concatenate(new_coords) | legacy function to homogenize distribution of points within a
rectangular area by reducing the number of points within
n**2 equally-sized bounding boxes to
the minimum number of points in one of those boxes.
Parameters
----------
coords : numpy.ndarray
Nx2 numpy array of coordinates to consider
tile_width : int
width of rectangular area containing coords
tile_height : int
height of rectangular area containing coords
n : int
number of subdivisions into which tile_width and tile_height
should be divided
Returns
-------
smoothed_coords : numpy.ndarray
Nx2 numpy array of smoothed subset of input coords | em_stitch/lens_correction/mesh_and_solve_transform.py | smooth_density_legacy | AllenInstitute/em_stitch | 2 | python | def smooth_density_legacy(coords, tile_width, tile_height, n):
'legacy function to homogenize distribution of points within a\n rectangular area by reducing the number of points within\n n**2 equally-sized bounding boxes to\n the minimum number of points in one of those boxes.\n\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 numpy array of coordinates to consider\n tile_width : int\n width of rectangular area containing coords\n tile_height : int\n height of rectangular area containing coords\n n : int\n number of subdivisions into which tile_width and tile_height\n should be divided\n\n Returns\n -------\n smoothed_coords : numpy.ndarray\n Nx2 numpy array of smoothed subset of input coords\n '
min_count = np.Inf
for i in range(n):
r = np.arange(((i * tile_width) / n), (((i + 1) * tile_width) / n))
for j in range(n):
c = np.arange(((j * tile_height) / n), (((j + 1) * tile_height) / n))
ind = np.argwhere(((((coords[(:, 0)] >= r.min()) & (coords[(:, 0)] <= r.max())) & (coords[(:, 1)] >= c.min())) & (coords[(:, 1)] <= c.max()))).flatten()
if (ind.size < min_count):
min_count = ind.size
new_coords = []
for i in range(n):
r = np.arange(((i * tile_width) / n), (((i + 1) * tile_width) / n))
for j in range(n):
c = np.arange(((j * tile_height) / n), (((j + 1) * tile_height) / n))
ind = np.argwhere(((((coords[(:, 0)] >= r.min()) & (coords[(:, 0)] <= r.max())) & (coords[(:, 1)] >= c.min())) & (coords[(:, 1)] <= c.max()))).flatten()
a = np.arange(ind.size)
np.random.shuffle(a)
ind = ind[a[0:min_count]]
new_coords.append(coords[ind])
return np.concatenate(new_coords) | def smooth_density_legacy(coords, tile_width, tile_height, n):
'legacy function to homogenize distribution of points within a\n rectangular area by reducing the number of points within\n n**2 equally-sized bounding boxes to\n the minimum number of points in one of those boxes.\n\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 numpy array of coordinates to consider\n tile_width : int\n width of rectangular area containing coords\n tile_height : int\n height of rectangular area containing coords\n n : int\n number of subdivisions into which tile_width and tile_height\n should be divided\n\n Returns\n -------\n smoothed_coords : numpy.ndarray\n Nx2 numpy array of smoothed subset of input coords\n '
min_count = np.Inf
for i in range(n):
r = np.arange(((i * tile_width) / n), (((i + 1) * tile_width) / n))
for j in range(n):
c = np.arange(((j * tile_height) / n), (((j + 1) * tile_height) / n))
ind = np.argwhere(((((coords[(:, 0)] >= r.min()) & (coords[(:, 0)] <= r.max())) & (coords[(:, 1)] >= c.min())) & (coords[(:, 1)] <= c.max()))).flatten()
if (ind.size < min_count):
min_count = ind.size
new_coords = []
for i in range(n):
r = np.arange(((i * tile_width) / n), (((i + 1) * tile_width) / n))
for j in range(n):
c = np.arange(((j * tile_height) / n), (((j + 1) * tile_height) / n))
ind = np.argwhere(((((coords[(:, 0)] >= r.min()) & (coords[(:, 0)] <= r.max())) & (coords[(:, 1)] >= c.min())) & (coords[(:, 1)] <= c.max()))).flatten()
a = np.arange(ind.size)
np.random.shuffle(a)
ind = ind[a[0:min_count]]
new_coords.append(coords[ind])
return np.concatenate(new_coords)<|docstring|>legacy function to homogenize distribution of points within a
rectangular area by reducing the number of points within
n**2 equally-sized bounding boxes to
the minimum number of points in one of those boxes.
Parameters
----------
coords : numpy.ndarray
Nx2 numpy array of coordinates to consider
tile_width : int
width of rectangular area containing coords
tile_height : int
height of rectangular area containing coords
n : int
number of subdivisions into which tile_width and tile_height
should be divided
Returns
-------
smoothed_coords : numpy.ndarray
Nx2 numpy array of smoothed subset of input coords<|endoftext|> |
e38ebeb79c90bbeb3e1e8715ce796a2f54db2929e307bea9ce941aa1badbf710 | def get_bboxes(tile_width, tile_height, n):
'get list of bounds for n**2 equally-sized bounding boxes within a\n rectangular bounding box\n\n\n Parameters\n ----------\n tile_width : int\n width of rectangular area to divide\n tile_height : int\n height of rectangular area to divide\n n : int\n number of subdivisions into which tile_width and tile_height\n should be divided\n\n Returns\n -------\n vtxs : list of tuple of numpy.ndarray\n list of min/max tuples of vertices representing bounding boxes\n '
numX = n
numY = n
diffX = ((tile_width - 1) / numX)
diffY = ((tile_height - 1) / numY)
squaremesh = np.mgrid[(0:(tile_width - 1):(numX * 1j), 0:(tile_height - 1):(numY * 1j))].reshape(2, (- 1)).T
maxpt = squaremesh.max(axis=0)
vtxs = []
for pt in squaremesh:
if np.any((pt == maxpt)):
continue
vtxs.append((pt, (pt + np.array([diffX, diffY]))))
return vtxs | get list of bounds for n**2 equally-sized bounding boxes within a
rectangular bounding box
Parameters
----------
tile_width : int
width of rectangular area to divide
tile_height : int
height of rectangular area to divide
n : int
number of subdivisions into which tile_width and tile_height
should be divided
Returns
-------
vtxs : list of tuple of numpy.ndarray
list of min/max tuples of vertices representing bounding boxes | em_stitch/lens_correction/mesh_and_solve_transform.py | get_bboxes | AllenInstitute/em_stitch | 2 | python | def get_bboxes(tile_width, tile_height, n):
'get list of bounds for n**2 equally-sized bounding boxes within a\n rectangular bounding box\n\n\n Parameters\n ----------\n tile_width : int\n width of rectangular area to divide\n tile_height : int\n height of rectangular area to divide\n n : int\n number of subdivisions into which tile_width and tile_height\n should be divided\n\n Returns\n -------\n vtxs : list of tuple of numpy.ndarray\n list of min/max tuples of vertices representing bounding boxes\n '
numX = n
numY = n
diffX = ((tile_width - 1) / numX)
diffY = ((tile_height - 1) / numY)
squaremesh = np.mgrid[(0:(tile_width - 1):(numX * 1j), 0:(tile_height - 1):(numY * 1j))].reshape(2, (- 1)).T
maxpt = squaremesh.max(axis=0)
vtxs = []
for pt in squaremesh:
if np.any((pt == maxpt)):
continue
vtxs.append((pt, (pt + np.array([diffX, diffY]))))
return vtxs | def get_bboxes(tile_width, tile_height, n):
'get list of bounds for n**2 equally-sized bounding boxes within a\n rectangular bounding box\n\n\n Parameters\n ----------\n tile_width : int\n width of rectangular area to divide\n tile_height : int\n height of rectangular area to divide\n n : int\n number of subdivisions into which tile_width and tile_height\n should be divided\n\n Returns\n -------\n vtxs : list of tuple of numpy.ndarray\n list of min/max tuples of vertices representing bounding boxes\n '
numX = n
numY = n
diffX = ((tile_width - 1) / numX)
diffY = ((tile_height - 1) / numY)
squaremesh = np.mgrid[(0:(tile_width - 1):(numX * 1j), 0:(tile_height - 1):(numY * 1j))].reshape(2, (- 1)).T
maxpt = squaremesh.max(axis=0)
vtxs = []
for pt in squaremesh:
if np.any((pt == maxpt)):
continue
vtxs.append((pt, (pt + np.array([diffX, diffY]))))
return vtxs<|docstring|>get list of bounds for n**2 equally-sized bounding boxes within a
rectangular bounding box
Parameters
----------
tile_width : int
width of rectangular area to divide
tile_height : int
height of rectangular area to divide
n : int
number of subdivisions into which tile_width and tile_height
should be divided
Returns
-------
vtxs : list of tuple of numpy.ndarray
list of min/max tuples of vertices representing bounding boxes<|endoftext|> |
fbcfe3ddedfe584676134cdd469ef6c85c5cc1a7d58298c9f70f09707945d2ff | def smooth_density_bbox(coords, tile_width, tile_height, n):
'homogenize distribution of points within a rectangular area by reducing\n the number of points within n**2 equally-sized bounding boxes to\n the minimum number of points in one of those boxes.\n\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 numpy array of coordinates to consider\n tile_width : int\n width of rectangular area containing coords\n tile_height : int\n height of rectangular area containing coords\n n : int\n number of subdivisions into which tile_width and tile_height\n should be divided\n\n Returns\n -------\n smoothed_coords : numpy.ndarray\n Nx2 numpy array of smoothed subset of input coords\n '
vtxs = get_bboxes(tile_width, tile_height, n)
index_arr = np.zeros(coords.shape[0], dtype='int64')
for (ei, (ll, ur)) in enumerate(vtxs):
vi = (ei + 1)
index_arr[np.all(((ll <= coords) & (ur >= coords)), axis=1)] = vi
bc = np.bincount(index_arr)
mincount = bc[1:].min()
idxs = _uniq(index_arr)
new_coords = np.concatenate([coords[np.random.choice(np.argwhere((index_arr == idx)).flatten(), mincount)] for idx in idxs])
return new_coords | homogenize distribution of points within a rectangular area by reducing
the number of points within n**2 equally-sized bounding boxes to
the minimum number of points in one of those boxes.
Parameters
----------
coords : numpy.ndarray
Nx2 numpy array of coordinates to consider
tile_width : int
width of rectangular area containing coords
tile_height : int
height of rectangular area containing coords
n : int
number of subdivisions into which tile_width and tile_height
should be divided
Returns
-------
smoothed_coords : numpy.ndarray
Nx2 numpy array of smoothed subset of input coords | em_stitch/lens_correction/mesh_and_solve_transform.py | smooth_density_bbox | AllenInstitute/em_stitch | 2 | python | def smooth_density_bbox(coords, tile_width, tile_height, n):
'homogenize distribution of points within a rectangular area by reducing\n the number of points within n**2 equally-sized bounding boxes to\n the minimum number of points in one of those boxes.\n\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 numpy array of coordinates to consider\n tile_width : int\n width of rectangular area containing coords\n tile_height : int\n height of rectangular area containing coords\n n : int\n number of subdivisions into which tile_width and tile_height\n should be divided\n\n Returns\n -------\n smoothed_coords : numpy.ndarray\n Nx2 numpy array of smoothed subset of input coords\n '
vtxs = get_bboxes(tile_width, tile_height, n)
index_arr = np.zeros(coords.shape[0], dtype='int64')
for (ei, (ll, ur)) in enumerate(vtxs):
vi = (ei + 1)
index_arr[np.all(((ll <= coords) & (ur >= coords)), axis=1)] = vi
bc = np.bincount(index_arr)
mincount = bc[1:].min()
idxs = _uniq(index_arr)
new_coords = np.concatenate([coords[np.random.choice(np.argwhere((index_arr == idx)).flatten(), mincount)] for idx in idxs])
return new_coords | def smooth_density_bbox(coords, tile_width, tile_height, n):
'homogenize distribution of points within a rectangular area by reducing\n the number of points within n**2 equally-sized bounding boxes to\n the minimum number of points in one of those boxes.\n\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 numpy array of coordinates to consider\n tile_width : int\n width of rectangular area containing coords\n tile_height : int\n height of rectangular area containing coords\n n : int\n number of subdivisions into which tile_width and tile_height\n should be divided\n\n Returns\n -------\n smoothed_coords : numpy.ndarray\n Nx2 numpy array of smoothed subset of input coords\n '
vtxs = get_bboxes(tile_width, tile_height, n)
index_arr = np.zeros(coords.shape[0], dtype='int64')
for (ei, (ll, ur)) in enumerate(vtxs):
vi = (ei + 1)
index_arr[np.all(((ll <= coords) & (ur >= coords)), axis=1)] = vi
bc = np.bincount(index_arr)
mincount = bc[1:].min()
idxs = _uniq(index_arr)
new_coords = np.concatenate([coords[np.random.choice(np.argwhere((index_arr == idx)).flatten(), mincount)] for idx in idxs])
return new_coords<|docstring|>homogenize distribution of points within a rectangular area by reducing
the number of points within n**2 equally-sized bounding boxes to
the minimum number of points in one of those boxes.
Parameters
----------
coords : numpy.ndarray
Nx2 numpy array of coordinates to consider
tile_width : int
width of rectangular area containing coords
tile_height : int
height of rectangular area containing coords
n : int
number of subdivisions into which tile_width and tile_height
should be divided
Returns
-------
smoothed_coords : numpy.ndarray
Nx2 numpy array of smoothed subset of input coords<|endoftext|> |
da0feef53c0f36d8922711f800dcd5aaaaa1efafad926d076b8f80558b5a1e14 | def smooth_density(coords, tile_width, tile_height, n, legacy_smooth_density=False, **kwargs):
'homogenize distribution of points within a rectangular area by reducing\n the number of points within n**2 equally-sized bounding boxes to\n the minimum number of points in one of those boxes.\n\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 numpy array of coordinates to consider\n tile_width : int\n width of rectangular area containing coords\n tile_height : int\n height of rectangular area containing coords\n n : int\n number of subdivisions into which tile_width and tile_height\n should be divided\n legacy_smooth_density : boolean\n whether to use (slower) legacy code. Not recommended.\n\n Returns\n -------\n smoothed_coords : numpy.ndarray\n Nx2 numpy array of smoothed subset of input coords\n '
if legacy_smooth_density:
return smooth_density_legacy(coords, tile_width, tile_height, n)
else:
return smooth_density_bbox(coords, tile_width, tile_height, n) | homogenize distribution of points within a rectangular area by reducing
the number of points within n**2 equally-sized bounding boxes to
the minimum number of points in one of those boxes.
Parameters
----------
coords : numpy.ndarray
Nx2 numpy array of coordinates to consider
tile_width : int
width of rectangular area containing coords
tile_height : int
height of rectangular area containing coords
n : int
number of subdivisions into which tile_width and tile_height
should be divided
legacy_smooth_density : boolean
whether to use (slower) legacy code. Not recommended.
Returns
-------
smoothed_coords : numpy.ndarray
Nx2 numpy array of smoothed subset of input coords | em_stitch/lens_correction/mesh_and_solve_transform.py | smooth_density | AllenInstitute/em_stitch | 2 | python | def smooth_density(coords, tile_width, tile_height, n, legacy_smooth_density=False, **kwargs):
'homogenize distribution of points within a rectangular area by reducing\n the number of points within n**2 equally-sized bounding boxes to\n the minimum number of points in one of those boxes.\n\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 numpy array of coordinates to consider\n tile_width : int\n width of rectangular area containing coords\n tile_height : int\n height of rectangular area containing coords\n n : int\n number of subdivisions into which tile_width and tile_height\n should be divided\n legacy_smooth_density : boolean\n whether to use (slower) legacy code. Not recommended.\n\n Returns\n -------\n smoothed_coords : numpy.ndarray\n Nx2 numpy array of smoothed subset of input coords\n '
if legacy_smooth_density:
return smooth_density_legacy(coords, tile_width, tile_height, n)
else:
return smooth_density_bbox(coords, tile_width, tile_height, n) | def smooth_density(coords, tile_width, tile_height, n, legacy_smooth_density=False, **kwargs):
'homogenize distribution of points within a rectangular area by reducing\n the number of points within n**2 equally-sized bounding boxes to\n the minimum number of points in one of those boxes.\n\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 numpy array of coordinates to consider\n tile_width : int\n width of rectangular area containing coords\n tile_height : int\n height of rectangular area containing coords\n n : int\n number of subdivisions into which tile_width and tile_height\n should be divided\n legacy_smooth_density : boolean\n whether to use (slower) legacy code. Not recommended.\n\n Returns\n -------\n smoothed_coords : numpy.ndarray\n Nx2 numpy array of smoothed subset of input coords\n '
if legacy_smooth_density:
return smooth_density_legacy(coords, tile_width, tile_height, n)
else:
return smooth_density_bbox(coords, tile_width, tile_height, n)<|docstring|>homogenize distribution of points within a rectangular area by reducing
the number of points within n**2 equally-sized bounding boxes to
the minimum number of points in one of those boxes.
Parameters
----------
coords : numpy.ndarray
Nx2 numpy array of coordinates to consider
tile_width : int
width of rectangular area containing coords
tile_height : int
height of rectangular area containing coords
n : int
number of subdivisions into which tile_width and tile_height
should be divided
legacy_smooth_density : boolean
whether to use (slower) legacy code. Not recommended.
Returns
-------
smoothed_coords : numpy.ndarray
Nx2 numpy array of smoothed subset of input coords<|endoftext|> |
332d70b2cb812e8dd13586d03344ce5f703d6ed378c7349796699a8ac2c62534 | def approx_snap_contour(contour, width, height, epsilon=20, snap_dist=5):
"Approximate a contour within a number of pixels, so it isn't too\n fine in the corner and snap to edges\n\n Parameters\n ----------\n contour : numpy.ndarray\n Nx2 array input to cv2.approxPolyDP\n width : int\n width to which approximated values are snapped\n height : int\n height to which approximated values are snapped\n epsilon : int\n maximum pixel distance between the original curve and its approximation\n\n Returns\n -------\n approx : numpy.ndarray\n Polygon approximating the contour\n "
approx = cv2.approxPolyDP(contour, epsilon, True)
for i in range(approx.shape[0]):
for j in [0, width]:
if (np.abs((approx[(i, 0, 0)] - j)) <= snap_dist):
approx[(i, 0, 0)] = j
for j in [0, height]:
if (np.abs((approx[(i, 0, 1)] - j)) <= snap_dist):
approx[(i, 0, 1)] = j
return approx | Approximate a contour within a number of pixels, so it isn't too
fine in the corner and snap to edges
Parameters
----------
contour : numpy.ndarray
Nx2 array input to cv2.approxPolyDP
width : int
width to which approximated values are snapped
height : int
height to which approximated values are snapped
epsilon : int
maximum pixel distance between the original curve and its approximation
Returns
-------
approx : numpy.ndarray
Polygon approximating the contour | em_stitch/lens_correction/mesh_and_solve_transform.py | approx_snap_contour | AllenInstitute/em_stitch | 2 | python | def approx_snap_contour(contour, width, height, epsilon=20, snap_dist=5):
"Approximate a contour within a number of pixels, so it isn't too\n fine in the corner and snap to edges\n\n Parameters\n ----------\n contour : numpy.ndarray\n Nx2 array input to cv2.approxPolyDP\n width : int\n width to which approximated values are snapped\n height : int\n height to which approximated values are snapped\n epsilon : int\n maximum pixel distance between the original curve and its approximation\n\n Returns\n -------\n approx : numpy.ndarray\n Polygon approximating the contour\n "
approx = cv2.approxPolyDP(contour, epsilon, True)
for i in range(approx.shape[0]):
for j in [0, width]:
if (np.abs((approx[(i, 0, 0)] - j)) <= snap_dist):
approx[(i, 0, 0)] = j
for j in [0, height]:
if (np.abs((approx[(i, 0, 1)] - j)) <= snap_dist):
approx[(i, 0, 1)] = j
return approx | def approx_snap_contour(contour, width, height, epsilon=20, snap_dist=5):
"Approximate a contour within a number of pixels, so it isn't too\n fine in the corner and snap to edges\n\n Parameters\n ----------\n contour : numpy.ndarray\n Nx2 array input to cv2.approxPolyDP\n width : int\n width to which approximated values are snapped\n height : int\n height to which approximated values are snapped\n epsilon : int\n maximum pixel distance between the original curve and its approximation\n\n Returns\n -------\n approx : numpy.ndarray\n Polygon approximating the contour\n "
approx = cv2.approxPolyDP(contour, epsilon, True)
for i in range(approx.shape[0]):
for j in [0, width]:
if (np.abs((approx[(i, 0, 0)] - j)) <= snap_dist):
approx[(i, 0, 0)] = j
for j in [0, height]:
if (np.abs((approx[(i, 0, 1)] - j)) <= snap_dist):
approx[(i, 0, 1)] = j
return approx<|docstring|>Approximate a contour within a number of pixels, so it isn't too
fine in the corner and snap to edges
Parameters
----------
contour : numpy.ndarray
Nx2 array input to cv2.approxPolyDP
width : int
width to which approximated values are snapped
height : int
height to which approximated values are snapped
epsilon : int
maximum pixel distance between the original curve and its approximation
Returns
-------
approx : numpy.ndarray
Polygon approximating the contour<|endoftext|> |
fb0a96b1b7178df111a6a02153c5661bb9e2920d21e5d573c9fa6f8bd339bb14 | def create_PSLG(tile_width, tile_height, maskUrl):
'create a PSLG (Planar Straight Line Graph) based on a masked image\n\n Parameters\n ----------\n tile_width : int\n width of tile for which PSLG will be created\n tile_height : int\n height of tile for which PSLG will be created\n maskUrl : str or None\n file uri to binary mask image\n\n Returns\n -------\n bbox : dict\n dictionary with keys vertices and segments representing the PSLG\n '
if (maskUrl is None):
vertices = np.array([[0, 0], [0, tile_height], [tile_width, tile_height], [tile_width, 0]])
segments = np.array([[0, 1], [1, 2], [2, 3], [3, 0]])
else:
mpath = urllib.parse.unquote(urllib.parse.urlparse(maskUrl).path)
im = cv2.imread(mpath, 0)
(_, contours, _) = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
approx = approx_snap_contour(contours[0], tile_width, tile_height)
vertices = np.array(approx).squeeze()
segments = np.zeros((vertices.shape[0], 2))
segments[(:, 0)] = np.arange(vertices.shape[0])
segments[(:, 1)] = (segments[(:, 0)] + 1)
segments[((- 1), 1)] = 0
bbox = {}
bbox['vertices'] = vertices
bbox['segments'] = segments
return bbox | create a PSLG (Planar Straight Line Graph) based on a masked image
Parameters
----------
tile_width : int
width of tile for which PSLG will be created
tile_height : int
height of tile for which PSLG will be created
maskUrl : str or None
file uri to binary mask image
Returns
-------
bbox : dict
dictionary with keys vertices and segments representing the PSLG | em_stitch/lens_correction/mesh_and_solve_transform.py | create_PSLG | AllenInstitute/em_stitch | 2 | python | def create_PSLG(tile_width, tile_height, maskUrl):
'create a PSLG (Planar Straight Line Graph) based on a masked image\n\n Parameters\n ----------\n tile_width : int\n width of tile for which PSLG will be created\n tile_height : int\n height of tile for which PSLG will be created\n maskUrl : str or None\n file uri to binary mask image\n\n Returns\n -------\n bbox : dict\n dictionary with keys vertices and segments representing the PSLG\n '
if (maskUrl is None):
vertices = np.array([[0, 0], [0, tile_height], [tile_width, tile_height], [tile_width, 0]])
segments = np.array([[0, 1], [1, 2], [2, 3], [3, 0]])
else:
mpath = urllib.parse.unquote(urllib.parse.urlparse(maskUrl).path)
im = cv2.imread(mpath, 0)
(_, contours, _) = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
approx = approx_snap_contour(contours[0], tile_width, tile_height)
vertices = np.array(approx).squeeze()
segments = np.zeros((vertices.shape[0], 2))
segments[(:, 0)] = np.arange(vertices.shape[0])
segments[(:, 1)] = (segments[(:, 0)] + 1)
segments[((- 1), 1)] = 0
bbox = {}
bbox['vertices'] = vertices
bbox['segments'] = segments
return bbox | def create_PSLG(tile_width, tile_height, maskUrl):
'create a PSLG (Planar Straight Line Graph) based on a masked image\n\n Parameters\n ----------\n tile_width : int\n width of tile for which PSLG will be created\n tile_height : int\n height of tile for which PSLG will be created\n maskUrl : str or None\n file uri to binary mask image\n\n Returns\n -------\n bbox : dict\n dictionary with keys vertices and segments representing the PSLG\n '
if (maskUrl is None):
vertices = np.array([[0, 0], [0, tile_height], [tile_width, tile_height], [tile_width, 0]])
segments = np.array([[0, 1], [1, 2], [2, 3], [3, 0]])
else:
mpath = urllib.parse.unquote(urllib.parse.urlparse(maskUrl).path)
im = cv2.imread(mpath, 0)
(_, contours, _) = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
approx = approx_snap_contour(contours[0], tile_width, tile_height)
vertices = np.array(approx).squeeze()
segments = np.zeros((vertices.shape[0], 2))
segments[(:, 0)] = np.arange(vertices.shape[0])
segments[(:, 1)] = (segments[(:, 0)] + 1)
segments[((- 1), 1)] = 0
bbox = {}
bbox['vertices'] = vertices
bbox['segments'] = segments
return bbox<|docstring|>create a PSLG (Planar Straight Line Graph) based on a masked image
Parameters
----------
tile_width : int
width of tile for which PSLG will be created
tile_height : int
height of tile for which PSLG will be created
maskUrl : str or None
file uri to binary mask image
Returns
-------
bbox : dict
dictionary with keys vertices and segments representing the PSLG<|endoftext|> |
e3d491fd31ae4e562027db3cb2029f308998cc3ca7881ff6aa0a8140d907db8e | def calculate_mesh(a, bbox, target, get_t=False):
'triangulate a mesh based on a Planar Straight Line Graph with\n mesh constraints. Return either the Delaunay triangulation or a\n difference in number of triangles from a target.\n\n Parameters\n ----------\n a : float\n global maximum triangular area constraint\n bbox : dict\n dictionary with keys vertices and segments representing PSLG\n target : int\n number of triangles to which the triangulation will be compared if\n get_t is False\n get_t : boolean\n whether to return a Delaunay Triangulation rather than a\n comparison to target\n\n Returns\n -------\n val : scipy.spatial.Delaunay or int\n Delaunay triangulation if get_t is True, otherwise the number of\n vertices in the triangulation subtracted from the target\n '
t = triangle.triangulate(bbox, ('pqa%0.1f' % a))
if get_t:
return Delaunay(t['vertices'])
return (target - len(t['vertices'])) | triangulate a mesh based on a Planar Straight Line Graph with
mesh constraints. Return either the Delaunay triangulation or a
difference in number of triangles from a target.
Parameters
----------
a : float
global maximum triangular area constraint
bbox : dict
dictionary with keys vertices and segments representing PSLG
target : int
number of triangles to which the triangulation will be compared if
get_t is False
get_t : boolean
whether to return a Delaunay Triangulation rather than a
comparison to target
Returns
-------
val : scipy.spatial.Delaunay or int
Delaunay triangulation if get_t is True, otherwise the number of
vertices in the triangulation subtracted from the target | em_stitch/lens_correction/mesh_and_solve_transform.py | calculate_mesh | AllenInstitute/em_stitch | 2 | python | def calculate_mesh(a, bbox, target, get_t=False):
'triangulate a mesh based on a Planar Straight Line Graph with\n mesh constraints. Return either the Delaunay triangulation or a\n difference in number of triangles from a target.\n\n Parameters\n ----------\n a : float\n global maximum triangular area constraint\n bbox : dict\n dictionary with keys vertices and segments representing PSLG\n target : int\n number of triangles to which the triangulation will be compared if\n get_t is False\n get_t : boolean\n whether to return a Delaunay Triangulation rather than a\n comparison to target\n\n Returns\n -------\n val : scipy.spatial.Delaunay or int\n Delaunay triangulation if get_t is True, otherwise the number of\n vertices in the triangulation subtracted from the target\n '
t = triangle.triangulate(bbox, ('pqa%0.1f' % a))
if get_t:
return Delaunay(t['vertices'])
return (target - len(t['vertices'])) | def calculate_mesh(a, bbox, target, get_t=False):
'triangulate a mesh based on a Planar Straight Line Graph with\n mesh constraints. Return either the Delaunay triangulation or a\n difference in number of triangles from a target.\n\n Parameters\n ----------\n a : float\n global maximum triangular area constraint\n bbox : dict\n dictionary with keys vertices and segments representing PSLG\n target : int\n number of triangles to which the triangulation will be compared if\n get_t is False\n get_t : boolean\n whether to return a Delaunay Triangulation rather than a\n comparison to target\n\n Returns\n -------\n val : scipy.spatial.Delaunay or int\n Delaunay triangulation if get_t is True, otherwise the number of\n vertices in the triangulation subtracted from the target\n '
t = triangle.triangulate(bbox, ('pqa%0.1f' % a))
if get_t:
return Delaunay(t['vertices'])
return (target - len(t['vertices']))<|docstring|>triangulate a mesh based on a Planar Straight Line Graph with
mesh constraints. Return either the Delaunay triangulation or a
difference in number of triangles from a target.
Parameters
----------
a : float
global maximum triangular area constraint
bbox : dict
dictionary with keys vertices and segments representing PSLG
target : int
number of triangles to which the triangulation will be compared if
get_t is False
get_t : boolean
whether to return a Delaunay Triangulation rather than a
comparison to target
Returns
-------
val : scipy.spatial.Delaunay or int
Delaunay triangulation if get_t is True, otherwise the number of
vertices in the triangulation subtracted from the target<|endoftext|> |
8952c5c0fbc558dd85e53c6db31ddc85d799894b8e216d1638d633e46b768ffa | def force_vertices_with_npoints(area_par, bbox, coords, npts, **kwargs):
'create a triangular mesh which iteratively attempts to conform to a\n minimum number of points per vertex by adjusting the maximum\n triangle area\n\n Parameters\n ----------\n area_par : float\n initial maximum triangle area constraint for triangle.triangulate\n bbox : dict\n PSLG bounding box dictionary from :func:create_PSLG\n coords : numpy.ndarray\n Nx2 points\n npts : int\n minimum number of points near each vertex\n\n Returns\n -------\n t : scipy.spatial.qhull.Delaunay\n triangle mesh with minimum point count near vertices\n area_par : float\n area parameter used to calculate result t\n '
fac = 1.02
count = 0
max_iter = 20
while True:
t = calculate_mesh(area_par, bbox, None, get_t=True)
pt_count = count_points_near_vertices(t, coords, **kwargs)
if (pt_count.min() >= npts):
break
area_par *= fac
count += 1
if (np.mod(count, 2) == 0):
fac += 0.5
if (np.mod(count, max_iter) == 0):
e = ('did not meet vertex requirement after %d iterations' % max_iter)
raise MeshLensCorrectionException(e)
return (t, area_par) | create a triangular mesh which iteratively attempts to conform to a
minimum number of points per vertex by adjusting the maximum
triangle area
Parameters
----------
area_par : float
initial maximum triangle area constraint for triangle.triangulate
bbox : dict
PSLG bounding box dictionary from :func:create_PSLG
coords : numpy.ndarray
Nx2 points
npts : int
minimum number of points near each vertex
Returns
-------
t : scipy.spatial.qhull.Delaunay
triangle mesh with minimum point count near vertices
area_par : float
area parameter used to calculate result t | em_stitch/lens_correction/mesh_and_solve_transform.py | force_vertices_with_npoints | AllenInstitute/em_stitch | 2 | python | def force_vertices_with_npoints(area_par, bbox, coords, npts, **kwargs):
'create a triangular mesh which iteratively attempts to conform to a\n minimum number of points per vertex by adjusting the maximum\n triangle area\n\n Parameters\n ----------\n area_par : float\n initial maximum triangle area constraint for triangle.triangulate\n bbox : dict\n PSLG bounding box dictionary from :func:create_PSLG\n coords : numpy.ndarray\n Nx2 points\n npts : int\n minimum number of points near each vertex\n\n Returns\n -------\n t : scipy.spatial.qhull.Delaunay\n triangle mesh with minimum point count near vertices\n area_par : float\n area parameter used to calculate result t\n '
fac = 1.02
count = 0
max_iter = 20
while True:
t = calculate_mesh(area_par, bbox, None, get_t=True)
pt_count = count_points_near_vertices(t, coords, **kwargs)
if (pt_count.min() >= npts):
break
area_par *= fac
count += 1
if (np.mod(count, 2) == 0):
fac += 0.5
if (np.mod(count, max_iter) == 0):
e = ('did not meet vertex requirement after %d iterations' % max_iter)
raise MeshLensCorrectionException(e)
return (t, area_par) | def force_vertices_with_npoints(area_par, bbox, coords, npts, **kwargs):
'create a triangular mesh which iteratively attempts to conform to a\n minimum number of points per vertex by adjusting the maximum\n triangle area\n\n Parameters\n ----------\n area_par : float\n initial maximum triangle area constraint for triangle.triangulate\n bbox : dict\n PSLG bounding box dictionary from :func:create_PSLG\n coords : numpy.ndarray\n Nx2 points\n npts : int\n minimum number of points near each vertex\n\n Returns\n -------\n t : scipy.spatial.qhull.Delaunay\n triangle mesh with minimum point count near vertices\n area_par : float\n area parameter used to calculate result t\n '
fac = 1.02
count = 0
max_iter = 20
while True:
t = calculate_mesh(area_par, bbox, None, get_t=True)
pt_count = count_points_near_vertices(t, coords, **kwargs)
if (pt_count.min() >= npts):
break
area_par *= fac
count += 1
if (np.mod(count, 2) == 0):
fac += 0.5
if (np.mod(count, max_iter) == 0):
e = ('did not meet vertex requirement after %d iterations' % max_iter)
raise MeshLensCorrectionException(e)
return (t, area_par)<|docstring|>create a triangular mesh which iteratively attempts to conform to a
minimum number of points per vertex by adjusting the maximum
triangle area
Parameters
----------
area_par : float
initial maximum triangle area constraint for triangle.triangulate
bbox : dict
PSLG bounding box dictionary from :func:create_PSLG
coords : numpy.ndarray
Nx2 points
npts : int
minimum number of points near each vertex
Returns
-------
t : scipy.spatial.qhull.Delaunay
triangle mesh with minimum point count near vertices
area_par : float
area parameter used to calculate result t<|endoftext|> |
fc1ebbead02b61e145265dae7d801f7f13699fe4c4c4c4d41c9ecbeca1539427 | def find_delaunay_with_max_vertices(bbox, nvertex):
'optimize a delaunay triangulation of a PSLG to create an\n expected number of vertices\n\n Parameters\n ----------\n bbox : dict\n dictionary with keys vertices and segments representing a PSLG\n nvertex : int\n number of vertices for the triangulation to target\n\n Returns\n -------\n mesh : scipy.spatial.Delaunay\n resultant triangulation\n a : float\n area constraint used in the optimized triangulation\n '
a1 = a2 = 1000000.0
t1 = calculate_mesh(a1, bbox, nvertex)
afac = np.power(10.0, (- np.sign(t1)))
while (np.sign(t1) == np.sign(calculate_mesh(a2, bbox, nvertex))):
a2 *= afac
val_at_root = (- 1)
nvtweak = nvertex
while (val_at_root < 0):
a = scipy.optimize.brentq(calculate_mesh, a1, a2, args=(bbox, nvtweak))
val_at_root = calculate_mesh(a, bbox, nvertex)
a1 = (a * 2)
a2 = (a * 0.5)
nvtweak -= 1
mesh = calculate_mesh(a, bbox, None, get_t=True)
return (mesh, a) | optimize a delaunay triangulation of a PSLG to create an
expected number of vertices
Parameters
----------
bbox : dict
dictionary with keys vertices and segments representing a PSLG
nvertex : int
number of vertices for the triangulation to target
Returns
-------
mesh : scipy.spatial.Delaunay
resultant triangulation
a : float
area constraint used in the optimized triangulation | em_stitch/lens_correction/mesh_and_solve_transform.py | find_delaunay_with_max_vertices | AllenInstitute/em_stitch | 2 | python | def find_delaunay_with_max_vertices(bbox, nvertex):
'optimize a delaunay triangulation of a PSLG to create an\n expected number of vertices\n\n Parameters\n ----------\n bbox : dict\n dictionary with keys vertices and segments representing a PSLG\n nvertex : int\n number of vertices for the triangulation to target\n\n Returns\n -------\n mesh : scipy.spatial.Delaunay\n resultant triangulation\n a : float\n area constraint used in the optimized triangulation\n '
a1 = a2 = 1000000.0
t1 = calculate_mesh(a1, bbox, nvertex)
afac = np.power(10.0, (- np.sign(t1)))
while (np.sign(t1) == np.sign(calculate_mesh(a2, bbox, nvertex))):
a2 *= afac
val_at_root = (- 1)
nvtweak = nvertex
while (val_at_root < 0):
a = scipy.optimize.brentq(calculate_mesh, a1, a2, args=(bbox, nvtweak))
val_at_root = calculate_mesh(a, bbox, nvertex)
a1 = (a * 2)
a2 = (a * 0.5)
nvtweak -= 1
mesh = calculate_mesh(a, bbox, None, get_t=True)
return (mesh, a) | def find_delaunay_with_max_vertices(bbox, nvertex):
'optimize a delaunay triangulation of a PSLG to create an\n expected number of vertices\n\n Parameters\n ----------\n bbox : dict\n dictionary with keys vertices and segments representing a PSLG\n nvertex : int\n number of vertices for the triangulation to target\n\n Returns\n -------\n mesh : scipy.spatial.Delaunay\n resultant triangulation\n a : float\n area constraint used in the optimized triangulation\n '
a1 = a2 = 1000000.0
t1 = calculate_mesh(a1, bbox, nvertex)
afac = np.power(10.0, (- np.sign(t1)))
while (np.sign(t1) == np.sign(calculate_mesh(a2, bbox, nvertex))):
a2 *= afac
val_at_root = (- 1)
nvtweak = nvertex
while (val_at_root < 0):
a = scipy.optimize.brentq(calculate_mesh, a1, a2, args=(bbox, nvtweak))
val_at_root = calculate_mesh(a, bbox, nvertex)
a1 = (a * 2)
a2 = (a * 0.5)
nvtweak -= 1
mesh = calculate_mesh(a, bbox, None, get_t=True)
return (mesh, a)<|docstring|>optimize a delaunay triangulation of a PSLG to create an
expected number of vertices
Parameters
----------
bbox : dict
dictionary with keys vertices and segments representing a PSLG
nvertex : int
number of vertices for the triangulation to target
Returns
-------
mesh : scipy.spatial.Delaunay
resultant triangulation
a : float
area constraint used in the optimized triangulation<|endoftext|> |
d8d28b194db1e816bb5ee2aaf1e25941228f06e0d1876209d90707989297fe05 | def compute_barycentrics_legacy(coords, mesh):
'legacy function to compute barycentric coordinates on mesh\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 array of points\n mesh : scipy.spatial.qhull.Delaunay\n triangular mesh\n\n Returns\n -------\n bcoords : numpy.ndarray\n Nx2 array of barycentric coordinates\n triangle_indices : numpy.ndarray\n simplex indices of barycentric coordinates\n '
triangle_indices = mesh.find_simplex(coords)
vt = np.vstack((np.transpose(mesh.points), np.ones(mesh.points.shape[0])))
mt = np.vstack((np.transpose(coords), np.ones(coords.shape[0])))
bary = np.zeros((3, coords.shape[0]))
Rinv = []
for tri in mesh.simplices:
Rinv.append(np.linalg.inv(vt[(:, tri)]))
for i in range(mesh.nsimplex):
ind = np.argwhere((triangle_indices == i)).flatten()
bary[(:, ind)] = Rinv[i].dot(mt[(:, ind)])
return (np.transpose(bary), triangle_indices) | legacy function to compute barycentric coordinates on mesh
Parameters
----------
coords : numpy.ndarray
Nx2 array of points
mesh : scipy.spatial.qhull.Delaunay
triangular mesh
Returns
-------
bcoords : numpy.ndarray
Nx2 array of barycentric coordinates
triangle_indices : numpy.ndarray
simplex indices of barycentric coordinates | em_stitch/lens_correction/mesh_and_solve_transform.py | compute_barycentrics_legacy | AllenInstitute/em_stitch | 2 | python | def compute_barycentrics_legacy(coords, mesh):
'legacy function to compute barycentric coordinates on mesh\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 array of points\n mesh : scipy.spatial.qhull.Delaunay\n triangular mesh\n\n Returns\n -------\n bcoords : numpy.ndarray\n Nx2 array of barycentric coordinates\n triangle_indices : numpy.ndarray\n simplex indices of barycentric coordinates\n '
triangle_indices = mesh.find_simplex(coords)
vt = np.vstack((np.transpose(mesh.points), np.ones(mesh.points.shape[0])))
mt = np.vstack((np.transpose(coords), np.ones(coords.shape[0])))
bary = np.zeros((3, coords.shape[0]))
Rinv = []
for tri in mesh.simplices:
Rinv.append(np.linalg.inv(vt[(:, tri)]))
for i in range(mesh.nsimplex):
ind = np.argwhere((triangle_indices == i)).flatten()
bary[(:, ind)] = Rinv[i].dot(mt[(:, ind)])
return (np.transpose(bary), triangle_indices) | def compute_barycentrics_legacy(coords, mesh):
'legacy function to compute barycentric coordinates on mesh\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 array of points\n mesh : scipy.spatial.qhull.Delaunay\n triangular mesh\n\n Returns\n -------\n bcoords : numpy.ndarray\n Nx2 array of barycentric coordinates\n triangle_indices : numpy.ndarray\n simplex indices of barycentric coordinates\n '
triangle_indices = mesh.find_simplex(coords)
vt = np.vstack((np.transpose(mesh.points), np.ones(mesh.points.shape[0])))
mt = np.vstack((np.transpose(coords), np.ones(coords.shape[0])))
bary = np.zeros((3, coords.shape[0]))
Rinv = []
for tri in mesh.simplices:
Rinv.append(np.linalg.inv(vt[(:, tri)]))
for i in range(mesh.nsimplex):
ind = np.argwhere((triangle_indices == i)).flatten()
bary[(:, ind)] = Rinv[i].dot(mt[(:, ind)])
return (np.transpose(bary), triangle_indices)<|docstring|>legacy function to compute barycentric coordinates on mesh
Parameters
----------
coords : numpy.ndarray
Nx2 array of points
mesh : scipy.spatial.qhull.Delaunay
triangular mesh
Returns
-------
bcoords : numpy.ndarray
Nx2 array of barycentric coordinates
triangle_indices : numpy.ndarray
simplex indices of barycentric coordinates<|endoftext|> |
1c83430e336247a05749963db192c5b03875aeb193045524a4478ed77a2c6bca | def compute_barycentrics_native(coords, mesh):
'convert coordinates to barycentric coordinates on mesh\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 array of points\n mesh : scipy.spatial.qhull.Delaunay\n triangular mesh\n\n Returns\n -------\n bcoords : numpy.ndarray\n Nx2 array of barycentric coordinates\n triangle_indices : numpy.ndarray\n simplex indices of barycentric coordinates\n '
triangle_indices = mesh.find_simplex(coords)
X = mesh.transform[(triangle_indices, :2)]
Y = (coords - mesh.transform[(triangle_indices, 2)])
b = np.einsum('ijk,ik->ij', X, Y)
bcoords = np.c_[(b, (1 - b.sum(axis=1)))]
return (bcoords, triangle_indices) | convert coordinates to barycentric coordinates on mesh
Parameters
----------
coords : numpy.ndarray
Nx2 array of points
mesh : scipy.spatial.qhull.Delaunay
triangular mesh
Returns
-------
bcoords : numpy.ndarray
Nx2 array of barycentric coordinates
triangle_indices : numpy.ndarray
simplex indices of barycentric coordinates | em_stitch/lens_correction/mesh_and_solve_transform.py | compute_barycentrics_native | AllenInstitute/em_stitch | 2 | python | def compute_barycentrics_native(coords, mesh):
'convert coordinates to barycentric coordinates on mesh\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 array of points\n mesh : scipy.spatial.qhull.Delaunay\n triangular mesh\n\n Returns\n -------\n bcoords : numpy.ndarray\n Nx2 array of barycentric coordinates\n triangle_indices : numpy.ndarray\n simplex indices of barycentric coordinates\n '
triangle_indices = mesh.find_simplex(coords)
X = mesh.transform[(triangle_indices, :2)]
Y = (coords - mesh.transform[(triangle_indices, 2)])
b = np.einsum('ijk,ik->ij', X, Y)
bcoords = np.c_[(b, (1 - b.sum(axis=1)))]
return (bcoords, triangle_indices) | def compute_barycentrics_native(coords, mesh):
'convert coordinates to barycentric coordinates on mesh\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 array of points\n mesh : scipy.spatial.qhull.Delaunay\n triangular mesh\n\n Returns\n -------\n bcoords : numpy.ndarray\n Nx2 array of barycentric coordinates\n triangle_indices : numpy.ndarray\n simplex indices of barycentric coordinates\n '
triangle_indices = mesh.find_simplex(coords)
X = mesh.transform[(triangle_indices, :2)]
Y = (coords - mesh.transform[(triangle_indices, 2)])
b = np.einsum('ijk,ik->ij', X, Y)
bcoords = np.c_[(b, (1 - b.sum(axis=1)))]
return (bcoords, triangle_indices)<|docstring|>convert coordinates to barycentric coordinates on mesh
Parameters
----------
coords : numpy.ndarray
Nx2 array of points
mesh : scipy.spatial.qhull.Delaunay
triangular mesh
Returns
-------
bcoords : numpy.ndarray
Nx2 array of barycentric coordinates
triangle_indices : numpy.ndarray
simplex indices of barycentric coordinates<|endoftext|> |
ade31d2253571136321c960f6e47b170806278530d1e186f6d11ef40466dd0b1 | def compute_barycentrics(coords, mesh, legacy_barycentrics=False, **kwargs):
'convert coordinates to barycentric coordinates on mesh\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 array of points\n mesh : scipy.spatial.qhull.Delaunay\n triangular mesh\n legacy_barycentrics : boolean\n whether to use (slower) legacy method to find barycentrics.\n\n Returns\n -------\n bcoords : numpy.ndarray\n Nx2 array of barycentric coordinates\n triangle_indices : numpy.ndarray\n simplex indices of barycentric coordinates\n '
if legacy_barycentrics:
return compute_barycentrics_legacy(coords, mesh)
else:
return compute_barycentrics_native(coords, mesh) | convert coordinates to barycentric coordinates on mesh
Parameters
----------
coords : numpy.ndarray
Nx2 array of points
mesh : scipy.spatial.qhull.Delaunay
triangular mesh
legacy_barycentrics : boolean
whether to use (slower) legacy method to find barycentrics.
Returns
-------
bcoords : numpy.ndarray
Nx2 array of barycentric coordinates
triangle_indices : numpy.ndarray
simplex indices of barycentric coordinates | em_stitch/lens_correction/mesh_and_solve_transform.py | compute_barycentrics | AllenInstitute/em_stitch | 2 | python | def compute_barycentrics(coords, mesh, legacy_barycentrics=False, **kwargs):
'convert coordinates to barycentric coordinates on mesh\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 array of points\n mesh : scipy.spatial.qhull.Delaunay\n triangular mesh\n legacy_barycentrics : boolean\n whether to use (slower) legacy method to find barycentrics.\n\n Returns\n -------\n bcoords : numpy.ndarray\n Nx2 array of barycentric coordinates\n triangle_indices : numpy.ndarray\n simplex indices of barycentric coordinates\n '
if legacy_barycentrics:
return compute_barycentrics_legacy(coords, mesh)
else:
return compute_barycentrics_native(coords, mesh) | def compute_barycentrics(coords, mesh, legacy_barycentrics=False, **kwargs):
'convert coordinates to barycentric coordinates on mesh\n\n Parameters\n ----------\n coords : numpy.ndarray\n Nx2 array of points\n mesh : scipy.spatial.qhull.Delaunay\n triangular mesh\n legacy_barycentrics : boolean\n whether to use (slower) legacy method to find barycentrics.\n\n Returns\n -------\n bcoords : numpy.ndarray\n Nx2 array of barycentric coordinates\n triangle_indices : numpy.ndarray\n simplex indices of barycentric coordinates\n '
if legacy_barycentrics:
return compute_barycentrics_legacy(coords, mesh)
else:
return compute_barycentrics_native(coords, mesh)<|docstring|>convert coordinates to barycentric coordinates on mesh
Parameters
----------
coords : numpy.ndarray
Nx2 array of points
mesh : scipy.spatial.qhull.Delaunay
triangular mesh
legacy_barycentrics : boolean
whether to use (slower) legacy method to find barycentrics.
Returns
-------
bcoords : numpy.ndarray
Nx2 array of barycentric coordinates
triangle_indices : numpy.ndarray
simplex indices of barycentric coordinates<|endoftext|> |
f324f024111c1d1be221fda87022b23fe4e8ea685a3b2bd087943d0c08514b39 | def count_points_near_vertices(t, coords, bruteforce_simplex_counts=False, count_bincount=True, **kwargs):
'enumerate coordinates closest to the vertices in a mesh\n\n Parameters\n ----------\n t : scipy.spatial.qhull.Delaunay\n triangular mesh\n coords : numpy.ndarray\n Nx2 array of points to assign to vertices on t\n bruteforce_simplex_counts : boolean\n whether to do a bruteforce simplex finding\n count_bincount : boolean\n use numpy.bincount based counting rather than legacy counting\n\n Returns\n -------\n pt_count : numpy.ndarray\n array with counts of points corresponding to indices of vertices in t\n '
flat_tri = t.simplices.flatten()
flat_ind = np.repeat(np.arange(t.nsimplex), 3)
v_touches = []
for i in range(t.npoints):
v_touches.append(flat_ind[np.argwhere((flat_tri == i))])
found = t.find_simplex(coords, bruteforce=bruteforce_simplex_counts)
if count_bincount:
bc = np.bincount(found, minlength=t.nsimplex)
pt_count = np.array([bc[v_touches[i]].sum() for i in range(t.npoints)])
else:
pt_count = np.zeros(t.npoints)
for i in range(t.npoints):
for j in v_touches[i]:
pt_count[i] += np.count_nonzero((found == j))
return pt_count | enumerate coordinates closest to the vertices in a mesh
Parameters
----------
t : scipy.spatial.qhull.Delaunay
triangular mesh
coords : numpy.ndarray
Nx2 array of points to assign to vertices on t
bruteforce_simplex_counts : boolean
whether to do a bruteforce simplex finding
count_bincount : boolean
use numpy.bincount based counting rather than legacy counting
Returns
-------
pt_count : numpy.ndarray
array with counts of points corresponding to indices of vertices in t | em_stitch/lens_correction/mesh_and_solve_transform.py | count_points_near_vertices | AllenInstitute/em_stitch | 2 | python | def count_points_near_vertices(t, coords, bruteforce_simplex_counts=False, count_bincount=True, **kwargs):
'enumerate coordinates closest to the vertices in a mesh\n\n Parameters\n ----------\n t : scipy.spatial.qhull.Delaunay\n triangular mesh\n coords : numpy.ndarray\n Nx2 array of points to assign to vertices on t\n bruteforce_simplex_counts : boolean\n whether to do a bruteforce simplex finding\n count_bincount : boolean\n use numpy.bincount based counting rather than legacy counting\n\n Returns\n -------\n pt_count : numpy.ndarray\n array with counts of points corresponding to indices of vertices in t\n '
flat_tri = t.simplices.flatten()
flat_ind = np.repeat(np.arange(t.nsimplex), 3)
v_touches = []
for i in range(t.npoints):
v_touches.append(flat_ind[np.argwhere((flat_tri == i))])
found = t.find_simplex(coords, bruteforce=bruteforce_simplex_counts)
if count_bincount:
bc = np.bincount(found, minlength=t.nsimplex)
pt_count = np.array([bc[v_touches[i]].sum() for i in range(t.npoints)])
else:
pt_count = np.zeros(t.npoints)
for i in range(t.npoints):
for j in v_touches[i]:
pt_count[i] += np.count_nonzero((found == j))
return pt_count | def count_points_near_vertices(t, coords, bruteforce_simplex_counts=False, count_bincount=True, **kwargs):
'enumerate coordinates closest to the vertices in a mesh\n\n Parameters\n ----------\n t : scipy.spatial.qhull.Delaunay\n triangular mesh\n coords : numpy.ndarray\n Nx2 array of points to assign to vertices on t\n bruteforce_simplex_counts : boolean\n whether to do a bruteforce simplex finding\n count_bincount : boolean\n use numpy.bincount based counting rather than legacy counting\n\n Returns\n -------\n pt_count : numpy.ndarray\n array with counts of points corresponding to indices of vertices in t\n '
flat_tri = t.simplices.flatten()
flat_ind = np.repeat(np.arange(t.nsimplex), 3)
v_touches = []
for i in range(t.npoints):
v_touches.append(flat_ind[np.argwhere((flat_tri == i))])
found = t.find_simplex(coords, bruteforce=bruteforce_simplex_counts)
if count_bincount:
bc = np.bincount(found, minlength=t.nsimplex)
pt_count = np.array([bc[v_touches[i]].sum() for i in range(t.npoints)])
else:
pt_count = np.zeros(t.npoints)
for i in range(t.npoints):
for j in v_touches[i]:
pt_count[i] += np.count_nonzero((found == j))
return pt_count<|docstring|>enumerate coordinates closest to the vertices in a mesh
Parameters
----------
t : scipy.spatial.qhull.Delaunay
triangular mesh
coords : numpy.ndarray
Nx2 array of points to assign to vertices on t
bruteforce_simplex_counts : boolean
whether to do a bruteforce simplex finding
count_bincount : boolean
use numpy.bincount based counting rather than legacy counting
Returns
-------
pt_count : numpy.ndarray
array with counts of points corresponding to indices of vertices in t<|endoftext|> |
6e241c010f03aa15302360768c6e592ef447ef70c96db901650d8ca9a9abbd66 | def create_regularization(ncols, ntiles, defaultL, transL, lensL):
'create sparse regularization matrix with distinct\n translation and lens regularization factors\n\n Parameters\n ----------\n ncols : int\n columns to generate (based on solver matrix)\n ntiles : int\n number of tiles to regularize\n defaultL : float\n default regularization parameter lambda\n transL : float\n translation-only regularization factor applied to default\n regularization parameter\n lensL : float\n nonlinear-only regularization factor applied to default\n regularization parameter\n\n Returns\n -------\n rmat : :class:`scipy.sparse.csr`\n sparse regularization matrix\n '
reg = (np.ones(ncols).astype('float64') * defaultL)
reg[0:(ntiles * 3)] *= transL
reg[(ntiles * 3):] *= lensL
rmat = sparse.eye(reg.size, dtype='float64', format='csr')
rmat.data = reg
return rmat | create sparse regularization matrix with distinct
translation and lens regularization factors
Parameters
----------
ncols : int
columns to generate (based on solver matrix)
ntiles : int
number of tiles to regularize
defaultL : float
default regularization parameter lambda
transL : float
translation-only regularization factor applied to default
regularization parameter
lensL : float
nonlinear-only regularization factor applied to default
regularization parameter
Returns
-------
rmat : :class:`scipy.sparse.csr`
sparse regularization matrix | em_stitch/lens_correction/mesh_and_solve_transform.py | create_regularization | AllenInstitute/em_stitch | 2 | python | def create_regularization(ncols, ntiles, defaultL, transL, lensL):
'create sparse regularization matrix with distinct\n translation and lens regularization factors\n\n Parameters\n ----------\n ncols : int\n columns to generate (based on solver matrix)\n ntiles : int\n number of tiles to regularize\n defaultL : float\n default regularization parameter lambda\n transL : float\n translation-only regularization factor applied to default\n regularization parameter\n lensL : float\n nonlinear-only regularization factor applied to default\n regularization parameter\n\n Returns\n -------\n rmat : :class:`scipy.sparse.csr`\n sparse regularization matrix\n '
reg = (np.ones(ncols).astype('float64') * defaultL)
reg[0:(ntiles * 3)] *= transL
reg[(ntiles * 3):] *= lensL
rmat = sparse.eye(reg.size, dtype='float64', format='csr')
rmat.data = reg
return rmat | def create_regularization(ncols, ntiles, defaultL, transL, lensL):
'create sparse regularization matrix with distinct\n translation and lens regularization factors\n\n Parameters\n ----------\n ncols : int\n columns to generate (based on solver matrix)\n ntiles : int\n number of tiles to regularize\n defaultL : float\n default regularization parameter lambda\n transL : float\n translation-only regularization factor applied to default\n regularization parameter\n lensL : float\n nonlinear-only regularization factor applied to default\n regularization parameter\n\n Returns\n -------\n rmat : :class:`scipy.sparse.csr`\n sparse regularization matrix\n '
reg = (np.ones(ncols).astype('float64') * defaultL)
reg[0:(ntiles * 3)] *= transL
reg[(ntiles * 3):] *= lensL
rmat = sparse.eye(reg.size, dtype='float64', format='csr')
rmat.data = reg
return rmat<|docstring|>create sparse regularization matrix with distinct
translation and lens regularization factors
Parameters
----------
ncols : int
columns to generate (based on solver matrix)
ntiles : int
number of tiles to regularize
defaultL : float
default regularization parameter lambda
transL : float
translation-only regularization factor applied to default
regularization parameter
lensL : float
nonlinear-only regularization factor applied to default
regularization parameter
Returns
-------
rmat : :class:`scipy.sparse.csr`
sparse regularization matrix<|endoftext|> |
af028e487807d5b030dec6b33b95cf89115daca1cc96e17901a64ec1acbc64d9 | def create_thinplatespline_tf(mesh, solution, lens_dof_start, logger=default_logger, compute_affine=False):
'create 2D Thin Plate Spline transform required to transform mesh to\n the solution derived from\n em_stitch.lens_correction.mesh_and_solve_transform.solve\n\n Parameters\n ----------\n mesh : scipy.spatial.qhull.Delaunay\n triangular source mesh object\n solution : list of numpy.ndarray\n list of numpy arrays of x and y vertex positions of the solution\n lens_dof_start : int\n start index describing degrees of freedom used in create_A\n logger : logging.Logger, optional\n logger used in this method\n compute_affine : bool\n Whether to compute an affine in Thin Plate Spline estimation.\n See renderapi.transform.ThinPlateSplineTransform\n\n Returns\n -------\n transform : renderapi.transform.ThinPlateSplineTransform\n transformation object deforming the mesh to the solution\n '
dst = np.zeros_like(mesh.points)
dst[(:, 0)] = (mesh.points[(:, 0)] + solution[0][lens_dof_start:])
dst[(:, 1)] = (mesh.points[(:, 1)] + solution[1][lens_dof_start:])
transform = renderapi.transform.ThinPlateSplineTransform()
transform.estimate(mesh.points, dst, computeAffine=compute_affine)
npts0 = transform.srcPts.shape[1]
transform = transform.adaptive_mesh_estimate(max_iter=1000)
npts1 = transform.srcPts.shape[1]
logger.info(('adaptive_mesh_estimate reduced control points from %d to %d' % (npts0, npts1)))
transform.transformId = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')[:(- 3)]
transform.labels = None
return transform | create 2D Thin Plate Spline transform required to transform mesh to
the solution derived from
em_stitch.lens_correction.mesh_and_solve_transform.solve
Parameters
----------
mesh : scipy.spatial.qhull.Delaunay
triangular source mesh object
solution : list of numpy.ndarray
list of numpy arrays of x and y vertex positions of the solution
lens_dof_start : int
start index describing degrees of freedom used in create_A
logger : logging.Logger, optional
logger used in this method
compute_affine : bool
Whether to compute an affine in Thin Plate Spline estimation.
See renderapi.transform.ThinPlateSplineTransform
Returns
-------
transform : renderapi.transform.ThinPlateSplineTransform
transformation object deforming the mesh to the solution | em_stitch/lens_correction/mesh_and_solve_transform.py | create_thinplatespline_tf | AllenInstitute/em_stitch | 2 | python | def create_thinplatespline_tf(mesh, solution, lens_dof_start, logger=default_logger, compute_affine=False):
'create 2D Thin Plate Spline transform required to transform mesh to\n the solution derived from\n em_stitch.lens_correction.mesh_and_solve_transform.solve\n\n Parameters\n ----------\n mesh : scipy.spatial.qhull.Delaunay\n triangular source mesh object\n solution : list of numpy.ndarray\n list of numpy arrays of x and y vertex positions of the solution\n lens_dof_start : int\n start index describing degrees of freedom used in create_A\n logger : logging.Logger, optional\n logger used in this method\n compute_affine : bool\n Whether to compute an affine in Thin Plate Spline estimation.\n See renderapi.transform.ThinPlateSplineTransform\n\n Returns\n -------\n transform : renderapi.transform.ThinPlateSplineTransform\n transformation object deforming the mesh to the solution\n '
dst = np.zeros_like(mesh.points)
dst[(:, 0)] = (mesh.points[(:, 0)] + solution[0][lens_dof_start:])
dst[(:, 1)] = (mesh.points[(:, 1)] + solution[1][lens_dof_start:])
transform = renderapi.transform.ThinPlateSplineTransform()
transform.estimate(mesh.points, dst, computeAffine=compute_affine)
npts0 = transform.srcPts.shape[1]
transform = transform.adaptive_mesh_estimate(max_iter=1000)
npts1 = transform.srcPts.shape[1]
logger.info(('adaptive_mesh_estimate reduced control points from %d to %d' % (npts0, npts1)))
transform.transformId = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')[:(- 3)]
transform.labels = None
return transform | def create_thinplatespline_tf(mesh, solution, lens_dof_start, logger=default_logger, compute_affine=False):
'create 2D Thin Plate Spline transform required to transform mesh to\n the solution derived from\n em_stitch.lens_correction.mesh_and_solve_transform.solve\n\n Parameters\n ----------\n mesh : scipy.spatial.qhull.Delaunay\n triangular source mesh object\n solution : list of numpy.ndarray\n list of numpy arrays of x and y vertex positions of the solution\n lens_dof_start : int\n start index describing degrees of freedom used in create_A\n logger : logging.Logger, optional\n logger used in this method\n compute_affine : bool\n Whether to compute an affine in Thin Plate Spline estimation.\n See renderapi.transform.ThinPlateSplineTransform\n\n Returns\n -------\n transform : renderapi.transform.ThinPlateSplineTransform\n transformation object deforming the mesh to the solution\n '
dst = np.zeros_like(mesh.points)
dst[(:, 0)] = (mesh.points[(:, 0)] + solution[0][lens_dof_start:])
dst[(:, 1)] = (mesh.points[(:, 1)] + solution[1][lens_dof_start:])
transform = renderapi.transform.ThinPlateSplineTransform()
transform.estimate(mesh.points, dst, computeAffine=compute_affine)
npts0 = transform.srcPts.shape[1]
transform = transform.adaptive_mesh_estimate(max_iter=1000)
npts1 = transform.srcPts.shape[1]
logger.info(('adaptive_mesh_estimate reduced control points from %d to %d' % (npts0, npts1)))
transform.transformId = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')[:(- 3)]
transform.labels = None
return transform<|docstring|>create 2D Thin Plate Spline transform required to transform mesh to
the solution derived from
em_stitch.lens_correction.mesh_and_solve_transform.solve
Parameters
----------
mesh : scipy.spatial.qhull.Delaunay
triangular source mesh object
solution : list of numpy.ndarray
list of numpy arrays of x and y vertex positions of the solution
lens_dof_start : int
start index describing degrees of freedom used in create_A
logger : logging.Logger, optional
logger used in this method
compute_affine : bool
Whether to compute an affine in Thin Plate Spline estimation.
See renderapi.transform.ThinPlateSplineTransform
Returns
-------
transform : renderapi.transform.ThinPlateSplineTransform
transformation object deforming the mesh to the solution<|endoftext|> |
361b20d4432139a23de7a3680c7388d5e65a5098368f96de5ca51ac38ed10464 | def new_specs_with_tf(ref_transform, tilespecs, transforms):
'create a copy of each tilespec in tilespecs with the first transform\n being a reference to ref_transform and the second based on transforms\n provided in transforms.\n This likely expects a single transformation in the input tilespecs.\n\n Parameters\n ----------\n ref_transform : renderapi.transform.Transform\n transform to attach as a reference transform at index 0 for\n each tilespec\n tilespecs : list of renderapi.tilespec.TileSpec\n tilespecs to which ref_transform and the corresponding tranform\n from transforms should be applied\n transforms : list of renderapi.transform.Transform\n list of transforms of same length as tilespecs to apply as transform at index 1 of each tilespec\n\n Returns\n -------\n newspecs : list of renderapi.tilespec.TileSpec\n copied tilespecs with transformations applied\n '
newspecs = []
for i in range(len(tilespecs)):
newspecs.append(copy.deepcopy(tilespecs[i]))
newspecs[(- 1)].tforms.insert(0, renderapi.transform.ReferenceTransform(refId=ref_transform.transformId))
newspecs[(- 1)].tforms[1] = transforms[i]
return newspecs | create a copy of each tilespec in tilespecs with the first transform
being a reference to ref_transform and the second based on transforms
provided in transforms.
This likely expects a single transformation in the input tilespecs.
Parameters
----------
ref_transform : renderapi.transform.Transform
transform to attach as a reference transform at index 0 for
each tilespec
tilespecs : list of renderapi.tilespec.TileSpec
tilespecs to which ref_transform and the corresponding tranform
from transforms should be applied
transforms : list of renderapi.transform.Transform
list of transforms of same length as tilespecs to apply as transform at index 1 of each tilespec
Returns
-------
newspecs : list of renderapi.tilespec.TileSpec
copied tilespecs with transformations applied | em_stitch/lens_correction/mesh_and_solve_transform.py | new_specs_with_tf | AllenInstitute/em_stitch | 2 | python | def new_specs_with_tf(ref_transform, tilespecs, transforms):
'create a copy of each tilespec in tilespecs with the first transform\n being a reference to ref_transform and the second based on transforms\n provided in transforms.\n This likely expects a single transformation in the input tilespecs.\n\n Parameters\n ----------\n ref_transform : renderapi.transform.Transform\n transform to attach as a reference transform at index 0 for\n each tilespec\n tilespecs : list of renderapi.tilespec.TileSpec\n tilespecs to which ref_transform and the corresponding tranform\n from transforms should be applied\n transforms : list of renderapi.transform.Transform\n list of transforms of same length as tilespecs to apply as transform at index 1 of each tilespec\n\n Returns\n -------\n newspecs : list of renderapi.tilespec.TileSpec\n copied tilespecs with transformations applied\n '
newspecs = []
for i in range(len(tilespecs)):
newspecs.append(copy.deepcopy(tilespecs[i]))
newspecs[(- 1)].tforms.insert(0, renderapi.transform.ReferenceTransform(refId=ref_transform.transformId))
newspecs[(- 1)].tforms[1] = transforms[i]
return newspecs | def new_specs_with_tf(ref_transform, tilespecs, transforms):
'create a copy of each tilespec in tilespecs with the first transform\n being a reference to ref_transform and the second based on transforms\n provided in transforms.\n This likely expects a single transformation in the input tilespecs.\n\n Parameters\n ----------\n ref_transform : renderapi.transform.Transform\n transform to attach as a reference transform at index 0 for\n each tilespec\n tilespecs : list of renderapi.tilespec.TileSpec\n tilespecs to which ref_transform and the corresponding tranform\n from transforms should be applied\n transforms : list of renderapi.transform.Transform\n list of transforms of same length as tilespecs to apply as transform at index 1 of each tilespec\n\n Returns\n -------\n newspecs : list of renderapi.tilespec.TileSpec\n copied tilespecs with transformations applied\n '
newspecs = []
for i in range(len(tilespecs)):
newspecs.append(copy.deepcopy(tilespecs[i]))
newspecs[(- 1)].tforms.insert(0, renderapi.transform.ReferenceTransform(refId=ref_transform.transformId))
newspecs[(- 1)].tforms[1] = transforms[i]
return newspecs<|docstring|>create a copy of each tilespec in tilespecs with the first transform
being a reference to ref_transform and the second based on transforms
provided in transforms.
This likely expects a single transformation in the input tilespecs.
Parameters
----------
ref_transform : renderapi.transform.Transform
transform to attach as a reference transform at index 0 for
each tilespec
tilespecs : list of renderapi.tilespec.TileSpec
tilespecs to which ref_transform and the corresponding tranform
from transforms should be applied
transforms : list of renderapi.transform.Transform
list of transforms of same length as tilespecs to apply as transform at index 1 of each tilespec
Returns
-------
newspecs : list of renderapi.tilespec.TileSpec
copied tilespecs with transformations applied<|endoftext|> |
b80e11b9b1cfab5c2b80fb3dab8c32f95835be3dc8f60297b7e09337fe270b75 | def solve(A, weights, reg, x0, b, precomputed_ATW=None, precomputed_ATWA=None, precomputed_K_factorized=None):
'regularized weighted solve\n\n Parameters\n ----------\n A : :class:`scipy.sparse.csr`\n the matrix, N (equations) x M (degrees of freedom)\n weights : :class:`scipy.sparse.csr_matrix`\n N x N diagonal matrix containing weights\n reg : :class:`scipy.sparse.csr_matrix`\n M x M diagonal matrix containing regularizations\n x0 : :class:`numpy.ndarray`\n M x nsolve float constraint values for the DOFs\n b : :class:`numpy.ndarray`:\n N x nsolve float right-hand-side(s)\n precomputed_ATW : :class:`scipy.sparse.csc_matrix`\n value to use rather than computing A.T.dot(weights)\n precomputed_ATWA : :class:`scipy.sparse.csc_matrix`\n value to use rather than computing A.T.dot(weights).dot(A)\n precomputed_K_factorized : func\n factorized solve function to use rather than computing\n scipy.sparse.linalg.factorized(A.T.dot(weights).dot(A) + reg)\n\n Returns\n -------\n solution : list of numpy.ndarray\n list of numpy arrays of x and y vertex positions of solution\n errx : numpy.ndarray\n numpy array of x residuals\n erry : numpy.ndarray\n numpy array of y residuals\n\n '
ATW = (A.transpose().dot(weights) if (precomputed_ATW is None) else precomputed_ATW)
if (precomputed_K_factorized is None):
K = ((ATW.dot(A) if (precomputed_ATWA is None) else precomputed_ATWA) + reg)
K_factorized = factorized(K)
else:
K_factorized = precomputed_K_factorized
solution = []
i = 0
for x in x0:
Lm = (reg.dot(x) + ATW.dot(b[(:, i)]))
i += 1
solution.append(K_factorized(Lm))
errx = (A.dot(solution[0]) - b[(:, 0)])
erry = (A.dot(solution[1]) - b[(:, 1)])
return (solution, errx, erry) | regularized weighted solve
Parameters
----------
A : :class:`scipy.sparse.csr`
the matrix, N (equations) x M (degrees of freedom)
weights : :class:`scipy.sparse.csr_matrix`
N x N diagonal matrix containing weights
reg : :class:`scipy.sparse.csr_matrix`
M x M diagonal matrix containing regularizations
x0 : :class:`numpy.ndarray`
M x nsolve float constraint values for the DOFs
b : :class:`numpy.ndarray`:
N x nsolve float right-hand-side(s)
precomputed_ATW : :class:`scipy.sparse.csc_matrix`
value to use rather than computing A.T.dot(weights)
precomputed_ATWA : :class:`scipy.sparse.csc_matrix`
value to use rather than computing A.T.dot(weights).dot(A)
precomputed_K_factorized : func
factorized solve function to use rather than computing
scipy.sparse.linalg.factorized(A.T.dot(weights).dot(A) + reg)
Returns
-------
solution : list of numpy.ndarray
list of numpy arrays of x and y vertex positions of solution
errx : numpy.ndarray
numpy array of x residuals
erry : numpy.ndarray
numpy array of y residuals | em_stitch/lens_correction/mesh_and_solve_transform.py | solve | AllenInstitute/em_stitch | 2 | python | def solve(A, weights, reg, x0, b, precomputed_ATW=None, precomputed_ATWA=None, precomputed_K_factorized=None):
'regularized weighted solve\n\n Parameters\n ----------\n A : :class:`scipy.sparse.csr`\n the matrix, N (equations) x M (degrees of freedom)\n weights : :class:`scipy.sparse.csr_matrix`\n N x N diagonal matrix containing weights\n reg : :class:`scipy.sparse.csr_matrix`\n M x M diagonal matrix containing regularizations\n x0 : :class:`numpy.ndarray`\n M x nsolve float constraint values for the DOFs\n b : :class:`numpy.ndarray`:\n N x nsolve float right-hand-side(s)\n precomputed_ATW : :class:`scipy.sparse.csc_matrix`\n value to use rather than computing A.T.dot(weights)\n precomputed_ATWA : :class:`scipy.sparse.csc_matrix`\n value to use rather than computing A.T.dot(weights).dot(A)\n precomputed_K_factorized : func\n factorized solve function to use rather than computing\n scipy.sparse.linalg.factorized(A.T.dot(weights).dot(A) + reg)\n\n Returns\n -------\n solution : list of numpy.ndarray\n list of numpy arrays of x and y vertex positions of solution\n errx : numpy.ndarray\n numpy array of x residuals\n erry : numpy.ndarray\n numpy array of y residuals\n\n '
ATW = (A.transpose().dot(weights) if (precomputed_ATW is None) else precomputed_ATW)
if (precomputed_K_factorized is None):
K = ((ATW.dot(A) if (precomputed_ATWA is None) else precomputed_ATWA) + reg)
K_factorized = factorized(K)
else:
K_factorized = precomputed_K_factorized
solution = []
i = 0
for x in x0:
Lm = (reg.dot(x) + ATW.dot(b[(:, i)]))
i += 1
solution.append(K_factorized(Lm))
errx = (A.dot(solution[0]) - b[(:, 0)])
erry = (A.dot(solution[1]) - b[(:, 1)])
return (solution, errx, erry) | def solve(A, weights, reg, x0, b, precomputed_ATW=None, precomputed_ATWA=None, precomputed_K_factorized=None):
'regularized weighted solve\n\n Parameters\n ----------\n A : :class:`scipy.sparse.csr`\n the matrix, N (equations) x M (degrees of freedom)\n weights : :class:`scipy.sparse.csr_matrix`\n N x N diagonal matrix containing weights\n reg : :class:`scipy.sparse.csr_matrix`\n M x M diagonal matrix containing regularizations\n x0 : :class:`numpy.ndarray`\n M x nsolve float constraint values for the DOFs\n b : :class:`numpy.ndarray`:\n N x nsolve float right-hand-side(s)\n precomputed_ATW : :class:`scipy.sparse.csc_matrix`\n value to use rather than computing A.T.dot(weights)\n precomputed_ATWA : :class:`scipy.sparse.csc_matrix`\n value to use rather than computing A.T.dot(weights).dot(A)\n precomputed_K_factorized : func\n factorized solve function to use rather than computing\n scipy.sparse.linalg.factorized(A.T.dot(weights).dot(A) + reg)\n\n Returns\n -------\n solution : list of numpy.ndarray\n list of numpy arrays of x and y vertex positions of solution\n errx : numpy.ndarray\n numpy array of x residuals\n erry : numpy.ndarray\n numpy array of y residuals\n\n '
ATW = (A.transpose().dot(weights) if (precomputed_ATW is None) else precomputed_ATW)
if (precomputed_K_factorized is None):
K = ((ATW.dot(A) if (precomputed_ATWA is None) else precomputed_ATWA) + reg)
K_factorized = factorized(K)
else:
K_factorized = precomputed_K_factorized
solution = []
i = 0
for x in x0:
Lm = (reg.dot(x) + ATW.dot(b[(:, i)]))
i += 1
solution.append(K_factorized(Lm))
errx = (A.dot(solution[0]) - b[(:, 0)])
erry = (A.dot(solution[1]) - b[(:, 1)])
return (solution, errx, erry)<|docstring|>regularized weighted solve
Parameters
----------
A : :class:`scipy.sparse.csr`
the matrix, N (equations) x M (degrees of freedom)
weights : :class:`scipy.sparse.csr_matrix`
N x N diagonal matrix containing weights
reg : :class:`scipy.sparse.csr_matrix`
M x M diagonal matrix containing regularizations
x0 : :class:`numpy.ndarray`
M x nsolve float constraint values for the DOFs
b : :class:`numpy.ndarray`:
N x nsolve float right-hand-side(s)
precomputed_ATW : :class:`scipy.sparse.csc_matrix`
value to use rather than computing A.T.dot(weights)
precomputed_ATWA : :class:`scipy.sparse.csc_matrix`
value to use rather than computing A.T.dot(weights).dot(A)
precomputed_K_factorized : func
factorized solve function to use rather than computing
scipy.sparse.linalg.factorized(A.T.dot(weights).dot(A) + reg)
Returns
-------
solution : list of numpy.ndarray
list of numpy arrays of x and y vertex positions of solution
errx : numpy.ndarray
numpy array of x residuals
erry : numpy.ndarray
numpy array of y residuals<|endoftext|> |
31e77a317bcac278b05fc7a98d2a2019711cc737521a77ebcc10124d97011066 | def report_solution(errx, erry, transforms, criteria):
'compile results, statistics, and messages for reporting information\n about lens correction solves\n\n Parameters\n ----------\n errx : numpy.ndarray\n numpy array of x residuals\n erry : numpy.ndarray\n numpy array of y residuals\n transforms : list of renderapi.transform.Transform\n list of transforms considered for the translation parameter\n criteria : dict\n criteria for good solve (deprecated)\n\n Returns\n -------\n translation : numpy.ndarray\n numpy array describing the resultant translations of the solution\n jresult : dict\n solution statistics dictionary\n message : str\n string reporting results\n '
translation = np.array([tf.translation for tf in transforms])
jresult = {}
jresult['x_res_min'] = errx.min()
jresult['x_res_max'] = errx.max()
jresult['x_res_mean'] = errx.mean()
jresult['x_res_std'] = errx.std()
jresult['y_res_min'] = erry.min()
jresult['y_res_max'] = erry.max()
jresult['y_res_mean'] = erry.mean()
jresult['y_res_std'] = erry.std()
for k in jresult.keys():
jresult[k] = np.round(jresult[k], 3)
message = 'lens solver results [px]'
for val in ['res']:
for xy in ['x', 'y']:
d = ('\n%8s' % (((xy + '_') + val) + ' '))
v = ''
for calc in ['min', 'max', 'mean', 'std']:
d += (calc + ',')
k = ((((xy + '_') + val) + '_') + calc)
v += ('%8.2f' % jresult[k])
message += (d + v)
return (translation, jresult, message) | compile results, statistics, and messages for reporting information
about lens correction solves
Parameters
----------
errx : numpy.ndarray
numpy array of x residuals
erry : numpy.ndarray
numpy array of y residuals
transforms : list of renderapi.transform.Transform
list of transforms considered for the translation parameter
criteria : dict
criteria for good solve (deprecated)
Returns
-------
translation : numpy.ndarray
numpy array describing the resultant translations of the solution
jresult : dict
solution statistics dictionary
message : str
string reporting results | em_stitch/lens_correction/mesh_and_solve_transform.py | report_solution | AllenInstitute/em_stitch | 2 | python | def report_solution(errx, erry, transforms, criteria):
'compile results, statistics, and messages for reporting information\n about lens correction solves\n\n Parameters\n ----------\n errx : numpy.ndarray\n numpy array of x residuals\n erry : numpy.ndarray\n numpy array of y residuals\n transforms : list of renderapi.transform.Transform\n list of transforms considered for the translation parameter\n criteria : dict\n criteria for good solve (deprecated)\n\n Returns\n -------\n translation : numpy.ndarray\n numpy array describing the resultant translations of the solution\n jresult : dict\n solution statistics dictionary\n message : str\n string reporting results\n '
translation = np.array([tf.translation for tf in transforms])
jresult = {}
jresult['x_res_min'] = errx.min()
jresult['x_res_max'] = errx.max()
jresult['x_res_mean'] = errx.mean()
jresult['x_res_std'] = errx.std()
jresult['y_res_min'] = erry.min()
jresult['y_res_max'] = erry.max()
jresult['y_res_mean'] = erry.mean()
jresult['y_res_std'] = erry.std()
for k in jresult.keys():
jresult[k] = np.round(jresult[k], 3)
message = 'lens solver results [px]'
for val in ['res']:
for xy in ['x', 'y']:
d = ('\n%8s' % (((xy + '_') + val) + ' '))
v =
for calc in ['min', 'max', 'mean', 'std']:
d += (calc + ',')
k = ((((xy + '_') + val) + '_') + calc)
v += ('%8.2f' % jresult[k])
message += (d + v)
return (translation, jresult, message) | def report_solution(errx, erry, transforms, criteria):
'compile results, statistics, and messages for reporting information\n about lens correction solves\n\n Parameters\n ----------\n errx : numpy.ndarray\n numpy array of x residuals\n erry : numpy.ndarray\n numpy array of y residuals\n transforms : list of renderapi.transform.Transform\n list of transforms considered for the translation parameter\n criteria : dict\n criteria for good solve (deprecated)\n\n Returns\n -------\n translation : numpy.ndarray\n numpy array describing the resultant translations of the solution\n jresult : dict\n solution statistics dictionary\n message : str\n string reporting results\n '
translation = np.array([tf.translation for tf in transforms])
jresult = {}
jresult['x_res_min'] = errx.min()
jresult['x_res_max'] = errx.max()
jresult['x_res_mean'] = errx.mean()
jresult['x_res_std'] = errx.std()
jresult['y_res_min'] = erry.min()
jresult['y_res_max'] = erry.max()
jresult['y_res_mean'] = erry.mean()
jresult['y_res_std'] = erry.std()
for k in jresult.keys():
jresult[k] = np.round(jresult[k], 3)
message = 'lens solver results [px]'
for val in ['res']:
for xy in ['x', 'y']:
d = ('\n%8s' % (((xy + '_') + val) + ' '))
v =
for calc in ['min', 'max', 'mean', 'std']:
d += (calc + ',')
k = ((((xy + '_') + val) + '_') + calc)
v += ('%8.2f' % jresult[k])
message += (d + v)
return (translation, jresult, message)<|docstring|>compile results, statistics, and messages for reporting information
about lens correction solves
Parameters
----------
errx : numpy.ndarray
numpy array of x residuals
erry : numpy.ndarray
numpy array of y residuals
transforms : list of renderapi.transform.Transform
list of transforms considered for the translation parameter
criteria : dict
criteria for good solve (deprecated)
Returns
-------
translation : numpy.ndarray
numpy array describing the resultant translations of the solution
jresult : dict
solution statistics dictionary
message : str
string reporting results<|endoftext|> |
def1d304e1cea7309d08d7f03089b99916641959ece472e64bc8a309efcb083a | def create_x0(nrows, tilespecs):
'create initialization array x0\n\n Parameters\n ----------\n nrows : int\n number of rows in array (defined to match A[1])\n tilespecs : list of renderapi.tilespecs.TileSpec\n tilespecs from which initialization is built\n\n Returns\n -------\n x0 : numpy.ndarray\n initialization array x0\n '
ntiles = len(tilespecs)
x0 = []
x0.append(np.zeros(nrows).astype('float64'))
x0.append(np.zeros(nrows).astype('float64'))
x0[0][0:ntiles] = np.zeros(ntiles)
x0[1][0:ntiles] = np.zeros(ntiles)
for i in range(ntiles):
x0[0][i] = tilespecs[i].tforms[0].B0
x0[1][i] = tilespecs[i].tforms[0].B1
return x0 | create initialization array x0
Parameters
----------
nrows : int
number of rows in array (defined to match A[1])
tilespecs : list of renderapi.tilespecs.TileSpec
tilespecs from which initialization is built
Returns
-------
x0 : numpy.ndarray
initialization array x0 | em_stitch/lens_correction/mesh_and_solve_transform.py | create_x0 | AllenInstitute/em_stitch | 2 | python | def create_x0(nrows, tilespecs):
'create initialization array x0\n\n Parameters\n ----------\n nrows : int\n number of rows in array (defined to match A[1])\n tilespecs : list of renderapi.tilespecs.TileSpec\n tilespecs from which initialization is built\n\n Returns\n -------\n x0 : numpy.ndarray\n initialization array x0\n '
ntiles = len(tilespecs)
x0 = []
x0.append(np.zeros(nrows).astype('float64'))
x0.append(np.zeros(nrows).astype('float64'))
x0[0][0:ntiles] = np.zeros(ntiles)
x0[1][0:ntiles] = np.zeros(ntiles)
for i in range(ntiles):
x0[0][i] = tilespecs[i].tforms[0].B0
x0[1][i] = tilespecs[i].tforms[0].B1
return x0 | def create_x0(nrows, tilespecs):
'create initialization array x0\n\n Parameters\n ----------\n nrows : int\n number of rows in array (defined to match A[1])\n tilespecs : list of renderapi.tilespecs.TileSpec\n tilespecs from which initialization is built\n\n Returns\n -------\n x0 : numpy.ndarray\n initialization array x0\n '
ntiles = len(tilespecs)
x0 = []
x0.append(np.zeros(nrows).astype('float64'))
x0.append(np.zeros(nrows).astype('float64'))
x0[0][0:ntiles] = np.zeros(ntiles)
x0[1][0:ntiles] = np.zeros(ntiles)
for i in range(ntiles):
x0[0][i] = tilespecs[i].tforms[0].B0
x0[1][i] = tilespecs[i].tforms[0].B1
return x0<|docstring|>create initialization array x0
Parameters
----------
nrows : int
number of rows in array (defined to match A[1])
tilespecs : list of renderapi.tilespecs.TileSpec
tilespecs from which initialization is built
Returns
-------
x0 : numpy.ndarray
initialization array x0<|endoftext|> |
3d442f3e534ca70aa05363cfedb1926e3bad9065eca0a40a16a49a387ec4d11b | def create_A(matches, tilespecs, mesh, **kwargs):
'create A matrix describing translation and lens correction\n\n Parameters\n ----------\n matches : list of dict\n list of match dictionaries in render format\n tilespecs : list of renderapi.tilespecs.TileSpec\n tilespecs to include in solve\n mesh : scipy.spatial.qhull.Delaunay\n mesh of input points as produced by\n em_stitch.lens_Correction.mesh_and_solve_transform._create_mesh\n\n Returns\n -------\n A : :class:`scipy.sparse.csr`\n matrix, N (equations) x M (degrees of freedom)\n wts : :class:`scipy.sparse.csr_matrix`\n N x N diagonal matrix containing weights\n b : :class:`numpy.ndarray`:\n N x nsolve float right-hand-side(s)\n lens_dof_start : int\n start index defined by degrees of freedom used to generate A\n '
dof_per_tile = 1
dof_per_vertex = 1
vertex_per_patch = 3
nnz_per_row = (2 * (dof_per_tile + (vertex_per_patch * dof_per_vertex)))
nrows = sum([len(m['matches']['p'][0]) for m in matches])
nd = (nnz_per_row * nrows)
lens_dof_start = (dof_per_tile * len(tilespecs))
data = np.zeros(nd).astype('float64')
b = np.zeros((nrows, 2)).astype('float64')
indices = np.zeros(nd).astype('int64')
indptr = np.zeros((nrows + 1)).astype('int64')
indptr[1:] = (np.arange(1, (nrows + 1)) * nnz_per_row)
weights = np.ones(nrows).astype('float64')
unique_ids = np.array([t.tileId for t in tilespecs])
offset = 0
rows = 0
for mi in range(len(matches)):
m = matches[mi]
pindex = np.argwhere((unique_ids == m['pId']))
qindex = np.argwhere((unique_ids == m['qId']))
npoint_pairs = len(m['matches']['q'][0])
pcoords = np.transpose(np.vstack((m['matches']['p'][0], m['matches']['p'][1]))).astype('float64')
qcoords = np.transpose(np.vstack((m['matches']['q'][0], m['matches']['q'][1]))).astype('float64')
b[rows:(rows + pcoords.shape[0])] = (qcoords - pcoords)
rows += pcoords.shape[0]
pbary = compute_barycentrics(pcoords, mesh, **kwargs)
qbary = compute_barycentrics(qcoords, mesh, **kwargs)
mstep = ((np.arange(npoint_pairs) * nnz_per_row) + offset)
data[(mstep + 0)] = 1.0
data[(mstep + 1)] = (- 1.0)
data[(mstep + 2)] = pbary[0][(:, 0)]
data[(mstep + 3)] = pbary[0][(:, 1)]
data[(mstep + 4)] = pbary[0][(:, 2)]
data[(mstep + 5)] = (- qbary[0][(:, 0)])
data[(mstep + 6)] = (- qbary[0][(:, 1)])
data[(mstep + 7)] = (- qbary[0][(:, 2)])
indices[(mstep + 0)] = pindex
indices[(mstep + 1)] = qindex
indices[(mstep + 2)] = (lens_dof_start + mesh.simplices[pbary[1][:]][(:, 0)])
indices[(mstep + 3)] = (lens_dof_start + mesh.simplices[pbary[1][:]][(:, 1)])
indices[(mstep + 4)] = (lens_dof_start + mesh.simplices[pbary[1][:]][(:, 2)])
indices[(mstep + 5)] = (lens_dof_start + mesh.simplices[qbary[1][:]][(:, 0)])
indices[(mstep + 6)] = (lens_dof_start + mesh.simplices[qbary[1][:]][(:, 1)])
indices[(mstep + 7)] = (lens_dof_start + mesh.simplices[qbary[1][:]][(:, 2)])
offset += (npoint_pairs * nnz_per_row)
A = csr_matrix((data, indices, indptr), dtype='float64')
wts = sparse.eye(weights.size, format='csr', dtype='float64')
wts.data = weights
return (A, wts, b, lens_dof_start) | create A matrix describing translation and lens correction
Parameters
----------
matches : list of dict
list of match dictionaries in render format
tilespecs : list of renderapi.tilespecs.TileSpec
tilespecs to include in solve
mesh : scipy.spatial.qhull.Delaunay
mesh of input points as produced by
em_stitch.lens_Correction.mesh_and_solve_transform._create_mesh
Returns
-------
A : :class:`scipy.sparse.csr`
matrix, N (equations) x M (degrees of freedom)
wts : :class:`scipy.sparse.csr_matrix`
N x N diagonal matrix containing weights
b : :class:`numpy.ndarray`:
N x nsolve float right-hand-side(s)
lens_dof_start : int
start index defined by degrees of freedom used to generate A | em_stitch/lens_correction/mesh_and_solve_transform.py | create_A | AllenInstitute/em_stitch | 2 | python | def create_A(matches, tilespecs, mesh, **kwargs):
'create A matrix describing translation and lens correction\n\n Parameters\n ----------\n matches : list of dict\n list of match dictionaries in render format\n tilespecs : list of renderapi.tilespecs.TileSpec\n tilespecs to include in solve\n mesh : scipy.spatial.qhull.Delaunay\n mesh of input points as produced by\n em_stitch.lens_Correction.mesh_and_solve_transform._create_mesh\n\n Returns\n -------\n A : :class:`scipy.sparse.csr`\n matrix, N (equations) x M (degrees of freedom)\n wts : :class:`scipy.sparse.csr_matrix`\n N x N diagonal matrix containing weights\n b : :class:`numpy.ndarray`:\n N x nsolve float right-hand-side(s)\n lens_dof_start : int\n start index defined by degrees of freedom used to generate A\n '
dof_per_tile = 1
dof_per_vertex = 1
vertex_per_patch = 3
nnz_per_row = (2 * (dof_per_tile + (vertex_per_patch * dof_per_vertex)))
nrows = sum([len(m['matches']['p'][0]) for m in matches])
nd = (nnz_per_row * nrows)
lens_dof_start = (dof_per_tile * len(tilespecs))
data = np.zeros(nd).astype('float64')
b = np.zeros((nrows, 2)).astype('float64')
indices = np.zeros(nd).astype('int64')
indptr = np.zeros((nrows + 1)).astype('int64')
indptr[1:] = (np.arange(1, (nrows + 1)) * nnz_per_row)
weights = np.ones(nrows).astype('float64')
unique_ids = np.array([t.tileId for t in tilespecs])
offset = 0
rows = 0
for mi in range(len(matches)):
m = matches[mi]
pindex = np.argwhere((unique_ids == m['pId']))
qindex = np.argwhere((unique_ids == m['qId']))
npoint_pairs = len(m['matches']['q'][0])
pcoords = np.transpose(np.vstack((m['matches']['p'][0], m['matches']['p'][1]))).astype('float64')
qcoords = np.transpose(np.vstack((m['matches']['q'][0], m['matches']['q'][1]))).astype('float64')
b[rows:(rows + pcoords.shape[0])] = (qcoords - pcoords)
rows += pcoords.shape[0]
pbary = compute_barycentrics(pcoords, mesh, **kwargs)
qbary = compute_barycentrics(qcoords, mesh, **kwargs)
mstep = ((np.arange(npoint_pairs) * nnz_per_row) + offset)
data[(mstep + 0)] = 1.0
data[(mstep + 1)] = (- 1.0)
data[(mstep + 2)] = pbary[0][(:, 0)]
data[(mstep + 3)] = pbary[0][(:, 1)]
data[(mstep + 4)] = pbary[0][(:, 2)]
data[(mstep + 5)] = (- qbary[0][(:, 0)])
data[(mstep + 6)] = (- qbary[0][(:, 1)])
data[(mstep + 7)] = (- qbary[0][(:, 2)])
indices[(mstep + 0)] = pindex
indices[(mstep + 1)] = qindex
indices[(mstep + 2)] = (lens_dof_start + mesh.simplices[pbary[1][:]][(:, 0)])
indices[(mstep + 3)] = (lens_dof_start + mesh.simplices[pbary[1][:]][(:, 1)])
indices[(mstep + 4)] = (lens_dof_start + mesh.simplices[pbary[1][:]][(:, 2)])
indices[(mstep + 5)] = (lens_dof_start + mesh.simplices[qbary[1][:]][(:, 0)])
indices[(mstep + 6)] = (lens_dof_start + mesh.simplices[qbary[1][:]][(:, 1)])
indices[(mstep + 7)] = (lens_dof_start + mesh.simplices[qbary[1][:]][(:, 2)])
offset += (npoint_pairs * nnz_per_row)
A = csr_matrix((data, indices, indptr), dtype='float64')
wts = sparse.eye(weights.size, format='csr', dtype='float64')
wts.data = weights
return (A, wts, b, lens_dof_start) | def create_A(matches, tilespecs, mesh, **kwargs):
'create A matrix describing translation and lens correction\n\n Parameters\n ----------\n matches : list of dict\n list of match dictionaries in render format\n tilespecs : list of renderapi.tilespecs.TileSpec\n tilespecs to include in solve\n mesh : scipy.spatial.qhull.Delaunay\n mesh of input points as produced by\n em_stitch.lens_Correction.mesh_and_solve_transform._create_mesh\n\n Returns\n -------\n A : :class:`scipy.sparse.csr`\n matrix, N (equations) x M (degrees of freedom)\n wts : :class:`scipy.sparse.csr_matrix`\n N x N diagonal matrix containing weights\n b : :class:`numpy.ndarray`:\n N x nsolve float right-hand-side(s)\n lens_dof_start : int\n start index defined by degrees of freedom used to generate A\n '
dof_per_tile = 1
dof_per_vertex = 1
vertex_per_patch = 3
nnz_per_row = (2 * (dof_per_tile + (vertex_per_patch * dof_per_vertex)))
nrows = sum([len(m['matches']['p'][0]) for m in matches])
nd = (nnz_per_row * nrows)
lens_dof_start = (dof_per_tile * len(tilespecs))
data = np.zeros(nd).astype('float64')
b = np.zeros((nrows, 2)).astype('float64')
indices = np.zeros(nd).astype('int64')
indptr = np.zeros((nrows + 1)).astype('int64')
indptr[1:] = (np.arange(1, (nrows + 1)) * nnz_per_row)
weights = np.ones(nrows).astype('float64')
unique_ids = np.array([t.tileId for t in tilespecs])
offset = 0
rows = 0
for mi in range(len(matches)):
m = matches[mi]
pindex = np.argwhere((unique_ids == m['pId']))
qindex = np.argwhere((unique_ids == m['qId']))
npoint_pairs = len(m['matches']['q'][0])
pcoords = np.transpose(np.vstack((m['matches']['p'][0], m['matches']['p'][1]))).astype('float64')
qcoords = np.transpose(np.vstack((m['matches']['q'][0], m['matches']['q'][1]))).astype('float64')
b[rows:(rows + pcoords.shape[0])] = (qcoords - pcoords)
rows += pcoords.shape[0]
pbary = compute_barycentrics(pcoords, mesh, **kwargs)
qbary = compute_barycentrics(qcoords, mesh, **kwargs)
mstep = ((np.arange(npoint_pairs) * nnz_per_row) + offset)
data[(mstep + 0)] = 1.0
data[(mstep + 1)] = (- 1.0)
data[(mstep + 2)] = pbary[0][(:, 0)]
data[(mstep + 3)] = pbary[0][(:, 1)]
data[(mstep + 4)] = pbary[0][(:, 2)]
data[(mstep + 5)] = (- qbary[0][(:, 0)])
data[(mstep + 6)] = (- qbary[0][(:, 1)])
data[(mstep + 7)] = (- qbary[0][(:, 2)])
indices[(mstep + 0)] = pindex
indices[(mstep + 1)] = qindex
indices[(mstep + 2)] = (lens_dof_start + mesh.simplices[pbary[1][:]][(:, 0)])
indices[(mstep + 3)] = (lens_dof_start + mesh.simplices[pbary[1][:]][(:, 1)])
indices[(mstep + 4)] = (lens_dof_start + mesh.simplices[pbary[1][:]][(:, 2)])
indices[(mstep + 5)] = (lens_dof_start + mesh.simplices[qbary[1][:]][(:, 0)])
indices[(mstep + 6)] = (lens_dof_start + mesh.simplices[qbary[1][:]][(:, 1)])
indices[(mstep + 7)] = (lens_dof_start + mesh.simplices[qbary[1][:]][(:, 2)])
offset += (npoint_pairs * nnz_per_row)
A = csr_matrix((data, indices, indptr), dtype='float64')
wts = sparse.eye(weights.size, format='csr', dtype='float64')
wts.data = weights
return (A, wts, b, lens_dof_start)<|docstring|>create A matrix describing translation and lens correction
Parameters
----------
matches : list of dict
list of match dictionaries in render format
tilespecs : list of renderapi.tilespecs.TileSpec
tilespecs to include in solve
mesh : scipy.spatial.qhull.Delaunay
mesh of input points as produced by
em_stitch.lens_Correction.mesh_and_solve_transform._create_mesh
Returns
-------
A : :class:`scipy.sparse.csr`
matrix, N (equations) x M (degrees of freedom)
wts : :class:`scipy.sparse.csr_matrix`
N x N diagonal matrix containing weights
b : :class:`numpy.ndarray`:
N x nsolve float right-hand-side(s)
lens_dof_start : int
start index defined by degrees of freedom used to generate A<|endoftext|> |
2c0d319c088da1aa8def8b7c72751f073357c2d59007af5a09280fade672966b | def create_transforms(ntiles, solution):
'create translation transformations from a solution array\n\n Parameters\n ----------\n ntiles : int\n number of tiles represented in the solution\n solution : list of numpy.ndarray\n list of numpy arrays of x and y positions result of solve\n\n Returns\n -------\n rtransforms : list of renderapi.transform.AffineModel\n transforms described by solution\n '
rtransforms = []
for i in range(ntiles):
rtransforms.append(renderapi.transform.AffineModel(B0=solution[0][i], B1=solution[1][i]))
return rtransforms | create translation transformations from a solution array
Parameters
----------
ntiles : int
number of tiles represented in the solution
solution : list of numpy.ndarray
list of numpy arrays of x and y positions result of solve
Returns
-------
rtransforms : list of renderapi.transform.AffineModel
transforms described by solution | em_stitch/lens_correction/mesh_and_solve_transform.py | create_transforms | AllenInstitute/em_stitch | 2 | python | def create_transforms(ntiles, solution):
'create translation transformations from a solution array\n\n Parameters\n ----------\n ntiles : int\n number of tiles represented in the solution\n solution : list of numpy.ndarray\n list of numpy arrays of x and y positions result of solve\n\n Returns\n -------\n rtransforms : list of renderapi.transform.AffineModel\n transforms described by solution\n '
rtransforms = []
for i in range(ntiles):
rtransforms.append(renderapi.transform.AffineModel(B0=solution[0][i], B1=solution[1][i]))
return rtransforms | def create_transforms(ntiles, solution):
'create translation transformations from a solution array\n\n Parameters\n ----------\n ntiles : int\n number of tiles represented in the solution\n solution : list of numpy.ndarray\n list of numpy arrays of x and y positions result of solve\n\n Returns\n -------\n rtransforms : list of renderapi.transform.AffineModel\n transforms described by solution\n '
rtransforms = []
for i in range(ntiles):
rtransforms.append(renderapi.transform.AffineModel(B0=solution[0][i], B1=solution[1][i]))
return rtransforms<|docstring|>create translation transformations from a solution array
Parameters
----------
ntiles : int
number of tiles represented in the solution
solution : list of numpy.ndarray
list of numpy arrays of x and y positions result of solve
Returns
-------
rtransforms : list of renderapi.transform.AffineModel
transforms described by solution<|endoftext|> |
fd99f510df909d7bb3e8f53b6ff3bc6c2fe933465ba977fb5254ee73222c7ed4 | def estimate_stage_affine(t0, t1):
'estimate affine transformation between translation components\n of tiles in t0 and tiles in t1 to give overall stage affine transformation.\n\n Parameters\n ----------\n t0 : list of renderapi.tilespec.TileSpec\n source tilespecs (initial position)\n t1 : list of renderapi.tilespec.TileSpec\n destination tilespecs (post-solve)\n \n Returns\n -------\n aff : renderapi.transform.AffineModel\n 2D affine transform representing stage transformation\n '
src = np.array([t.tforms[0].translation for t in t0])
dst = np.array([t.tforms[1].translation for t in t1])
aff = renderapi.transform.AffineModel()
aff.estimate(src, dst)
return aff | estimate affine transformation between translation components
of tiles in t0 and tiles in t1 to give overall stage affine transformation.
Parameters
----------
t0 : list of renderapi.tilespec.TileSpec
source tilespecs (initial position)
t1 : list of renderapi.tilespec.TileSpec
destination tilespecs (post-solve)
Returns
-------
aff : renderapi.transform.AffineModel
2D affine transform representing stage transformation | em_stitch/lens_correction/mesh_and_solve_transform.py | estimate_stage_affine | AllenInstitute/em_stitch | 2 | python | def estimate_stage_affine(t0, t1):
'estimate affine transformation between translation components\n of tiles in t0 and tiles in t1 to give overall stage affine transformation.\n\n Parameters\n ----------\n t0 : list of renderapi.tilespec.TileSpec\n source tilespecs (initial position)\n t1 : list of renderapi.tilespec.TileSpec\n destination tilespecs (post-solve)\n \n Returns\n -------\n aff : renderapi.transform.AffineModel\n 2D affine transform representing stage transformation\n '
src = np.array([t.tforms[0].translation for t in t0])
dst = np.array([t.tforms[1].translation for t in t1])
aff = renderapi.transform.AffineModel()
aff.estimate(src, dst)
return aff | def estimate_stage_affine(t0, t1):
'estimate affine transformation between translation components\n of tiles in t0 and tiles in t1 to give overall stage affine transformation.\n\n Parameters\n ----------\n t0 : list of renderapi.tilespec.TileSpec\n source tilespecs (initial position)\n t1 : list of renderapi.tilespec.TileSpec\n destination tilespecs (post-solve)\n \n Returns\n -------\n aff : renderapi.transform.AffineModel\n 2D affine transform representing stage transformation\n '
src = np.array([t.tforms[0].translation for t in t0])
dst = np.array([t.tforms[1].translation for t in t1])
aff = renderapi.transform.AffineModel()
aff.estimate(src, dst)
return aff<|docstring|>estimate affine transformation between translation components
of tiles in t0 and tiles in t1 to give overall stage affine transformation.
Parameters
----------
t0 : list of renderapi.tilespec.TileSpec
source tilespecs (initial position)
t1 : list of renderapi.tilespec.TileSpec
destination tilespecs (post-solve)
Returns
-------
aff : renderapi.transform.AffineModel
2D affine transform representing stage transformation<|endoftext|> |
ad2cfe0b351e4f390252b2624e1cb07e038a15fed94496aa8f5acf91027e01ba | def _create_mesh(resolvedtiles, matches, nvertex, return_area_triangle_par=False, **kwargs):
'create mesh with a given number of vertices based on example tiles\n and pointmatches\n\n Parameters\n ----------\n resolvedtiles : renderapi.resolvedtiles.ResolvedTiles\n resolvedtiles containing a tilespec with mask, width, and height\n properties to use as a template for the mesh\n matches : list of dict\n list of point correspondences in render pointmatch format\n nvertex : int\n number of vertices for mesh\n return_area_triangle_par : boolean\n whether to return the area parameter used to generate the\n triangular mesh\n\n Returns\n -------\n mesh : scipy.spatial.qhull.Delaunay\n triangular mesh\n area_triangle_par : float\n max area constraint used in generating mesh\n '
remove_weighted_matches(matches, weight=0.0)
tilespecs = resolvedtiles.tilespecs
example_tspec = tilespecs[0]
tile_width = example_tspec.width
tile_height = example_tspec.height
maskUrl = example_tspec.ip[0].maskUrl
coords = condense_coords(matches)
nc0 = coords.shape[0]
coords = smooth_density(coords, tile_width, tile_height, 10, **kwargs)
nc1 = coords.shape[0]
logger.info(('\n smoothing point density reduced points from %d to %d' % (nc0, nc1)))
if (coords.shape[0] == 0):
raise MeshLensCorrectionException('no point matches left after smoothing density, probably some sparse areas of matching')
bbox = create_PSLG(tile_width, tile_height, maskUrl)
(mesh, area_triangle_par) = find_delaunay_with_max_vertices(bbox, nvertex)
(mesh, area_triangle_par) = force_vertices_with_npoints(area_triangle_par, bbox, coords, 3, **kwargs)
return ((mesh, area_triangle_par) if return_area_triangle_par else mesh) | create mesh with a given number of vertices based on example tiles
and pointmatches
Parameters
----------
resolvedtiles : renderapi.resolvedtiles.ResolvedTiles
resolvedtiles containing a tilespec with mask, width, and height
properties to use as a template for the mesh
matches : list of dict
list of point correspondences in render pointmatch format
nvertex : int
number of vertices for mesh
return_area_triangle_par : boolean
whether to return the area parameter used to generate the
triangular mesh
Returns
-------
mesh : scipy.spatial.qhull.Delaunay
triangular mesh
area_triangle_par : float
max area constraint used in generating mesh | em_stitch/lens_correction/mesh_and_solve_transform.py | _create_mesh | AllenInstitute/em_stitch | 2 | python | def _create_mesh(resolvedtiles, matches, nvertex, return_area_triangle_par=False, **kwargs):
'create mesh with a given number of vertices based on example tiles\n and pointmatches\n\n Parameters\n ----------\n resolvedtiles : renderapi.resolvedtiles.ResolvedTiles\n resolvedtiles containing a tilespec with mask, width, and height\n properties to use as a template for the mesh\n matches : list of dict\n list of point correspondences in render pointmatch format\n nvertex : int\n number of vertices for mesh\n return_area_triangle_par : boolean\n whether to return the area parameter used to generate the\n triangular mesh\n\n Returns\n -------\n mesh : scipy.spatial.qhull.Delaunay\n triangular mesh\n area_triangle_par : float\n max area constraint used in generating mesh\n '
remove_weighted_matches(matches, weight=0.0)
tilespecs = resolvedtiles.tilespecs
example_tspec = tilespecs[0]
tile_width = example_tspec.width
tile_height = example_tspec.height
maskUrl = example_tspec.ip[0].maskUrl
coords = condense_coords(matches)
nc0 = coords.shape[0]
coords = smooth_density(coords, tile_width, tile_height, 10, **kwargs)
nc1 = coords.shape[0]
logger.info(('\n smoothing point density reduced points from %d to %d' % (nc0, nc1)))
if (coords.shape[0] == 0):
raise MeshLensCorrectionException('no point matches left after smoothing density, probably some sparse areas of matching')
bbox = create_PSLG(tile_width, tile_height, maskUrl)
(mesh, area_triangle_par) = find_delaunay_with_max_vertices(bbox, nvertex)
(mesh, area_triangle_par) = force_vertices_with_npoints(area_triangle_par, bbox, coords, 3, **kwargs)
return ((mesh, area_triangle_par) if return_area_triangle_par else mesh) | def _create_mesh(resolvedtiles, matches, nvertex, return_area_triangle_par=False, **kwargs):
'create mesh with a given number of vertices based on example tiles\n and pointmatches\n\n Parameters\n ----------\n resolvedtiles : renderapi.resolvedtiles.ResolvedTiles\n resolvedtiles containing a tilespec with mask, width, and height\n properties to use as a template for the mesh\n matches : list of dict\n list of point correspondences in render pointmatch format\n nvertex : int\n number of vertices for mesh\n return_area_triangle_par : boolean\n whether to return the area parameter used to generate the\n triangular mesh\n\n Returns\n -------\n mesh : scipy.spatial.qhull.Delaunay\n triangular mesh\n area_triangle_par : float\n max area constraint used in generating mesh\n '
remove_weighted_matches(matches, weight=0.0)
tilespecs = resolvedtiles.tilespecs
example_tspec = tilespecs[0]
tile_width = example_tspec.width
tile_height = example_tspec.height
maskUrl = example_tspec.ip[0].maskUrl
coords = condense_coords(matches)
nc0 = coords.shape[0]
coords = smooth_density(coords, tile_width, tile_height, 10, **kwargs)
nc1 = coords.shape[0]
logger.info(('\n smoothing point density reduced points from %d to %d' % (nc0, nc1)))
if (coords.shape[0] == 0):
raise MeshLensCorrectionException('no point matches left after smoothing density, probably some sparse areas of matching')
bbox = create_PSLG(tile_width, tile_height, maskUrl)
(mesh, area_triangle_par) = find_delaunay_with_max_vertices(bbox, nvertex)
(mesh, area_triangle_par) = force_vertices_with_npoints(area_triangle_par, bbox, coords, 3, **kwargs)
return ((mesh, area_triangle_par) if return_area_triangle_par else mesh)<|docstring|>create mesh with a given number of vertices based on example tiles
and pointmatches
Parameters
----------
resolvedtiles : renderapi.resolvedtiles.ResolvedTiles
resolvedtiles containing a tilespec with mask, width, and height
properties to use as a template for the mesh
matches : list of dict
list of point correspondences in render pointmatch format
nvertex : int
number of vertices for mesh
return_area_triangle_par : boolean
whether to return the area parameter used to generate the
triangular mesh
Returns
-------
mesh : scipy.spatial.qhull.Delaunay
triangular mesh
area_triangle_par : float
max area constraint used in generating mesh<|endoftext|> |
0ab49d4964e62cd5f8fbf2bad144027defa941e667eb284243c27062e5688c44 | def _solve_resolvedtiles(resolvedtiles, matches, nvertex, regularization_lambda, regularization_translation_factor, regularization_lens_lambda, good_solve_dict, logger=default_logger, **kwargs):
'generate lens correction from resolvedtiles and pointmatches\n\n Parameters\n ----------\n resolvedtiles : renderapi.resolvedtiles.ResolvedTiles\n resolvedtiles object on which transformation will be computed\n matches : list of dict\n point correspondences to consider in render pointmatch format\n nvertex :\n number of vertices in mesh\n regularization_lambda : float\n lambda value for affine regularization\n regularization_translation_factor : float\n translation factor of regularization\n regularization_lens_lambda : float\n lambda value for lens regularization\n good_solve_dict :\n dictionary to define when a solve fails\n logger : logging.Logger\n logger to use in reporting\n Returns\n -------\n resolved : renderapi.resolvedtiles.ResolvedTiles\n new resolvedtiles object with derived lens correction applied\n new_ref_transform : renderapi.transform.leaf.ThinPlateSplineTransform\n derived lens correction transform\n jresult : dict\n dictionary of solve information\n '
tilespecs = resolvedtiles.tilespecs
example_tspec = tilespecs[0]
mesh = _create_mesh(resolvedtiles, matches, nvertex, **kwargs)
nend = mesh.points.shape[0]
logger.info(('\n aimed for %d mesh points, got %d' % (nvertex, nend)))
if (mesh.points.shape[0] < (0.5 * nvertex)):
raise MeshLensCorrectionException('mesh coarser than intended')
(A, weights, b, lens_dof_start) = create_A(matches, tilespecs, mesh)
x0 = create_x0(A.shape[1], tilespecs)
reg = create_regularization(A.shape[1], len(tilespecs), regularization_lambda, regularization_translation_factor, regularization_lens_lambda)
(solution, errx, erry) = solve(A, weights, reg, x0, b)
transforms = create_transforms(len(tilespecs), solution)
(tf_trans, jresult, solve_message) = report_solution(errx, erry, transforms, good_solve_dict)
logger.info(solve_message)
if (not all([(errx.mean() < good_solve_dict['error_mean']), (erry.mean() < good_solve_dict['error_mean']), (errx.std() < good_solve_dict['error_std']), (erry.std() < good_solve_dict['error_std'])])):
raise MeshLensCorrectionException(('Solve not good: %s' % solve_message))
logger.debug(solve_message)
new_ref_transform = create_thinplatespline_tf(mesh, solution, lens_dof_start, logger)
bbox = example_tspec.bbox_transformed(tf_limit=0)
tbbox = new_ref_transform.tform(bbox)
bstr = 'new transform corners:\n'
for i in range((bbox.shape[0] - 1)):
bstr += (' (%0.1f, %0.1f) -> (%0.1f, %0.1f)\n' % (bbox[(i, 0)], bbox[(i, 1)], tbbox[(i, 0)], tbbox[(i, 1)]))
logger.info(bstr)
new_tilespecs = new_specs_with_tf(new_ref_transform, tilespecs, transforms)
stage_affine = estimate_stage_affine(tilespecs, new_tilespecs)
sastr = (((('affine estimate of tile translations:\n' + ' scale: {}\n'.format(stage_affine.scale)) + ' translation: {}\n'.format(stage_affine.translation)) + ' shear: {}\n'.format(stage_affine.shear)) + ' rotation: {}\n'.format(np.degrees(stage_affine.rotation)))
logger.info(sastr)
resolved = renderapi.resolvedtiles.ResolvedTiles(tilespecs=new_tilespecs, transformList=[new_ref_transform])
return (resolved, new_ref_transform, jresult) | generate lens correction from resolvedtiles and pointmatches
Parameters
----------
resolvedtiles : renderapi.resolvedtiles.ResolvedTiles
resolvedtiles object on which transformation will be computed
matches : list of dict
point correspondences to consider in render pointmatch format
nvertex :
number of vertices in mesh
regularization_lambda : float
lambda value for affine regularization
regularization_translation_factor : float
translation factor of regularization
regularization_lens_lambda : float
lambda value for lens regularization
good_solve_dict :
dictionary to define when a solve fails
logger : logging.Logger
logger to use in reporting
Returns
-------
resolved : renderapi.resolvedtiles.ResolvedTiles
new resolvedtiles object with derived lens correction applied
new_ref_transform : renderapi.transform.leaf.ThinPlateSplineTransform
derived lens correction transform
jresult : dict
dictionary of solve information | em_stitch/lens_correction/mesh_and_solve_transform.py | _solve_resolvedtiles | AllenInstitute/em_stitch | 2 | python | def _solve_resolvedtiles(resolvedtiles, matches, nvertex, regularization_lambda, regularization_translation_factor, regularization_lens_lambda, good_solve_dict, logger=default_logger, **kwargs):
'generate lens correction from resolvedtiles and pointmatches\n\n Parameters\n ----------\n resolvedtiles : renderapi.resolvedtiles.ResolvedTiles\n resolvedtiles object on which transformation will be computed\n matches : list of dict\n point correspondences to consider in render pointmatch format\n nvertex :\n number of vertices in mesh\n regularization_lambda : float\n lambda value for affine regularization\n regularization_translation_factor : float\n translation factor of regularization\n regularization_lens_lambda : float\n lambda value for lens regularization\n good_solve_dict :\n dictionary to define when a solve fails\n logger : logging.Logger\n logger to use in reporting\n Returns\n -------\n resolved : renderapi.resolvedtiles.ResolvedTiles\n new resolvedtiles object with derived lens correction applied\n new_ref_transform : renderapi.transform.leaf.ThinPlateSplineTransform\n derived lens correction transform\n jresult : dict\n dictionary of solve information\n '
tilespecs = resolvedtiles.tilespecs
example_tspec = tilespecs[0]
mesh = _create_mesh(resolvedtiles, matches, nvertex, **kwargs)
nend = mesh.points.shape[0]
logger.info(('\n aimed for %d mesh points, got %d' % (nvertex, nend)))
if (mesh.points.shape[0] < (0.5 * nvertex)):
raise MeshLensCorrectionException('mesh coarser than intended')
(A, weights, b, lens_dof_start) = create_A(matches, tilespecs, mesh)
x0 = create_x0(A.shape[1], tilespecs)
reg = create_regularization(A.shape[1], len(tilespecs), regularization_lambda, regularization_translation_factor, regularization_lens_lambda)
(solution, errx, erry) = solve(A, weights, reg, x0, b)
transforms = create_transforms(len(tilespecs), solution)
(tf_trans, jresult, solve_message) = report_solution(errx, erry, transforms, good_solve_dict)
logger.info(solve_message)
if (not all([(errx.mean() < good_solve_dict['error_mean']), (erry.mean() < good_solve_dict['error_mean']), (errx.std() < good_solve_dict['error_std']), (erry.std() < good_solve_dict['error_std'])])):
raise MeshLensCorrectionException(('Solve not good: %s' % solve_message))
logger.debug(solve_message)
new_ref_transform = create_thinplatespline_tf(mesh, solution, lens_dof_start, logger)
bbox = example_tspec.bbox_transformed(tf_limit=0)
tbbox = new_ref_transform.tform(bbox)
bstr = 'new transform corners:\n'
for i in range((bbox.shape[0] - 1)):
bstr += (' (%0.1f, %0.1f) -> (%0.1f, %0.1f)\n' % (bbox[(i, 0)], bbox[(i, 1)], tbbox[(i, 0)], tbbox[(i, 1)]))
logger.info(bstr)
new_tilespecs = new_specs_with_tf(new_ref_transform, tilespecs, transforms)
stage_affine = estimate_stage_affine(tilespecs, new_tilespecs)
sastr = (((('affine estimate of tile translations:\n' + ' scale: {}\n'.format(stage_affine.scale)) + ' translation: {}\n'.format(stage_affine.translation)) + ' shear: {}\n'.format(stage_affine.shear)) + ' rotation: {}\n'.format(np.degrees(stage_affine.rotation)))
logger.info(sastr)
resolved = renderapi.resolvedtiles.ResolvedTiles(tilespecs=new_tilespecs, transformList=[new_ref_transform])
return (resolved, new_ref_transform, jresult) | def _solve_resolvedtiles(resolvedtiles, matches, nvertex, regularization_lambda, regularization_translation_factor, regularization_lens_lambda, good_solve_dict, logger=default_logger, **kwargs):
'generate lens correction from resolvedtiles and pointmatches\n\n Parameters\n ----------\n resolvedtiles : renderapi.resolvedtiles.ResolvedTiles\n resolvedtiles object on which transformation will be computed\n matches : list of dict\n point correspondences to consider in render pointmatch format\n nvertex :\n number of vertices in mesh\n regularization_lambda : float\n lambda value for affine regularization\n regularization_translation_factor : float\n translation factor of regularization\n regularization_lens_lambda : float\n lambda value for lens regularization\n good_solve_dict :\n dictionary to define when a solve fails\n logger : logging.Logger\n logger to use in reporting\n Returns\n -------\n resolved : renderapi.resolvedtiles.ResolvedTiles\n new resolvedtiles object with derived lens correction applied\n new_ref_transform : renderapi.transform.leaf.ThinPlateSplineTransform\n derived lens correction transform\n jresult : dict\n dictionary of solve information\n '
tilespecs = resolvedtiles.tilespecs
example_tspec = tilespecs[0]
mesh = _create_mesh(resolvedtiles, matches, nvertex, **kwargs)
nend = mesh.points.shape[0]
logger.info(('\n aimed for %d mesh points, got %d' % (nvertex, nend)))
if (mesh.points.shape[0] < (0.5 * nvertex)):
raise MeshLensCorrectionException('mesh coarser than intended')
(A, weights, b, lens_dof_start) = create_A(matches, tilespecs, mesh)
x0 = create_x0(A.shape[1], tilespecs)
reg = create_regularization(A.shape[1], len(tilespecs), regularization_lambda, regularization_translation_factor, regularization_lens_lambda)
(solution, errx, erry) = solve(A, weights, reg, x0, b)
transforms = create_transforms(len(tilespecs), solution)
(tf_trans, jresult, solve_message) = report_solution(errx, erry, transforms, good_solve_dict)
logger.info(solve_message)
if (not all([(errx.mean() < good_solve_dict['error_mean']), (erry.mean() < good_solve_dict['error_mean']), (errx.std() < good_solve_dict['error_std']), (erry.std() < good_solve_dict['error_std'])])):
raise MeshLensCorrectionException(('Solve not good: %s' % solve_message))
logger.debug(solve_message)
new_ref_transform = create_thinplatespline_tf(mesh, solution, lens_dof_start, logger)
bbox = example_tspec.bbox_transformed(tf_limit=0)
tbbox = new_ref_transform.tform(bbox)
bstr = 'new transform corners:\n'
for i in range((bbox.shape[0] - 1)):
bstr += (' (%0.1f, %0.1f) -> (%0.1f, %0.1f)\n' % (bbox[(i, 0)], bbox[(i, 1)], tbbox[(i, 0)], tbbox[(i, 1)]))
logger.info(bstr)
new_tilespecs = new_specs_with_tf(new_ref_transform, tilespecs, transforms)
stage_affine = estimate_stage_affine(tilespecs, new_tilespecs)
sastr = (((('affine estimate of tile translations:\n' + ' scale: {}\n'.format(stage_affine.scale)) + ' translation: {}\n'.format(stage_affine.translation)) + ' shear: {}\n'.format(stage_affine.shear)) + ' rotation: {}\n'.format(np.degrees(stage_affine.rotation)))
logger.info(sastr)
resolved = renderapi.resolvedtiles.ResolvedTiles(tilespecs=new_tilespecs, transformList=[new_ref_transform])
return (resolved, new_ref_transform, jresult)<|docstring|>generate lens correction from resolvedtiles and pointmatches
Parameters
----------
resolvedtiles : renderapi.resolvedtiles.ResolvedTiles
resolvedtiles object on which transformation will be computed
matches : list of dict
point correspondences to consider in render pointmatch format
nvertex :
number of vertices in mesh
regularization_lambda : float
lambda value for affine regularization
regularization_translation_factor : float
translation factor of regularization
regularization_lens_lambda : float
lambda value for lens regularization
good_solve_dict :
dictionary to define when a solve fails
logger : logging.Logger
logger to use in reporting
Returns
-------
resolved : renderapi.resolvedtiles.ResolvedTiles
new resolvedtiles object with derived lens correction applied
new_ref_transform : renderapi.transform.leaf.ThinPlateSplineTransform
derived lens correction transform
jresult : dict
dictionary of solve information<|endoftext|> |
bcdac2e334b94440749a2980ddb86492bc73f85c59f722d3ec6497b3cdcaa71c | def solve_resolvedtiles_from_args(self):
'use arguments to run lens correction\n\n Returns\n -------\n resolved : renderapi.resolvedtiles.ResolvedTiles\n new resolvedtiles object with derived lens correction applied\n new_ref_transform : renderapi.transform.leaf.ThinPlateSplineTransform\n derived lens correction transform\n jresult : dict\n dictionary of solve information\n '
if ('tilespecs' in self.args):
jspecs = self.args['tilespecs']
else:
jspecs = jsongz.load(self.args['tilespec_file'])
self.tilespecs = np.array([renderapi.tilespec.TileSpec(json=j) for j in jspecs])
if ('matches' in self.args):
self.matches = self.args['matches']
else:
self.matches = jsongz.load(self.args['match_file'])
return _solve_resolvedtiles(renderapi.resolvedtiles.ResolvedTiles(tilespecs=self.tilespecs, transformList=[]), self.matches, self.args['nvertex'], self.args['regularization']['default_lambda'], self.args['regularization']['translation_factor'], self.args['regularization']['lens_lambda'], self.args['good_solve'], logger=self.logger) | use arguments to run lens correction
Returns
-------
resolved : renderapi.resolvedtiles.ResolvedTiles
new resolvedtiles object with derived lens correction applied
new_ref_transform : renderapi.transform.leaf.ThinPlateSplineTransform
derived lens correction transform
jresult : dict
dictionary of solve information | em_stitch/lens_correction/mesh_and_solve_transform.py | solve_resolvedtiles_from_args | AllenInstitute/em_stitch | 2 | python | def solve_resolvedtiles_from_args(self):
'use arguments to run lens correction\n\n Returns\n -------\n resolved : renderapi.resolvedtiles.ResolvedTiles\n new resolvedtiles object with derived lens correction applied\n new_ref_transform : renderapi.transform.leaf.ThinPlateSplineTransform\n derived lens correction transform\n jresult : dict\n dictionary of solve information\n '
if ('tilespecs' in self.args):
jspecs = self.args['tilespecs']
else:
jspecs = jsongz.load(self.args['tilespec_file'])
self.tilespecs = np.array([renderapi.tilespec.TileSpec(json=j) for j in jspecs])
if ('matches' in self.args):
self.matches = self.args['matches']
else:
self.matches = jsongz.load(self.args['match_file'])
return _solve_resolvedtiles(renderapi.resolvedtiles.ResolvedTiles(tilespecs=self.tilespecs, transformList=[]), self.matches, self.args['nvertex'], self.args['regularization']['default_lambda'], self.args['regularization']['translation_factor'], self.args['regularization']['lens_lambda'], self.args['good_solve'], logger=self.logger) | def solve_resolvedtiles_from_args(self):
'use arguments to run lens correction\n\n Returns\n -------\n resolved : renderapi.resolvedtiles.ResolvedTiles\n new resolvedtiles object with derived lens correction applied\n new_ref_transform : renderapi.transform.leaf.ThinPlateSplineTransform\n derived lens correction transform\n jresult : dict\n dictionary of solve information\n '
if ('tilespecs' in self.args):
jspecs = self.args['tilespecs']
else:
jspecs = jsongz.load(self.args['tilespec_file'])
self.tilespecs = np.array([renderapi.tilespec.TileSpec(json=j) for j in jspecs])
if ('matches' in self.args):
self.matches = self.args['matches']
else:
self.matches = jsongz.load(self.args['match_file'])
return _solve_resolvedtiles(renderapi.resolvedtiles.ResolvedTiles(tilespecs=self.tilespecs, transformList=[]), self.matches, self.args['nvertex'], self.args['regularization']['default_lambda'], self.args['regularization']['translation_factor'], self.args['regularization']['lens_lambda'], self.args['good_solve'], logger=self.logger)<|docstring|>use arguments to run lens correction
Returns
-------
resolved : renderapi.resolvedtiles.ResolvedTiles
new resolvedtiles object with derived lens correction applied
new_ref_transform : renderapi.transform.leaf.ThinPlateSplineTransform
derived lens correction transform
jresult : dict
dictionary of solve information<|endoftext|> |
ec7177b489cf300df91b85888be97a8e248b03b76414f5ede1b23ab92e682687 | def colorWipe(self, R, G, B):
'Wipe color across display a pixel at a time.'
color = Color(R, G, B)
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, color)
self.strip.show() | Wipe color across display a pixel at a time. | server/LEDapp.py | colorWipe | ggoupy/Adeept_RaspTank | 2 | python | def colorWipe(self, R, G, B):
color = Color(R, G, B)
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, color)
self.strip.show() | def colorWipe(self, R, G, B):
color = Color(R, G, B)
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, color)
self.strip.show()<|docstring|>Wipe color across display a pixel at a time.<|endoftext|> |
6955dec1a1d2ccc348eceba460c235a39d88f3637dbcf5bdf6c7b3d904ca5b46 | def generate_args2cmd(self, args, shell):
'\n 转换请求参数为命令行\n :param args:\n :param shell:\n :return:\n '
if shell:
cmd_args = (self.path if self.path else '')
for (name, value) in args.items():
cmd_args += f' -{name} {shlex.quote(str(value))}'
else:
cmd_args = [self.path]
for (name, value) in args.items():
cmd_args.append(f'-{name}')
cmd_args.append(f'{value}')
return cmd_args | 转换请求参数为命令行
:param args:
:param shell:
:return: | sql/plugins/sqladvisor.py | generate_args2cmd | sosofun123/Archery | 3,458 | python | def generate_args2cmd(self, args, shell):
'\n 转换请求参数为命令行\n :param args:\n :param shell:\n :return:\n '
if shell:
cmd_args = (self.path if self.path else )
for (name, value) in args.items():
cmd_args += f' -{name} {shlex.quote(str(value))}'
else:
cmd_args = [self.path]
for (name, value) in args.items():
cmd_args.append(f'-{name}')
cmd_args.append(f'{value}')
return cmd_args | def generate_args2cmd(self, args, shell):
'\n 转换请求参数为命令行\n :param args:\n :param shell:\n :return:\n '
if shell:
cmd_args = (self.path if self.path else )
for (name, value) in args.items():
cmd_args += f' -{name} {shlex.quote(str(value))}'
else:
cmd_args = [self.path]
for (name, value) in args.items():
cmd_args.append(f'-{name}')
cmd_args.append(f'{value}')
return cmd_args<|docstring|>转换请求参数为命令行
:param args:
:param shell:
:return:<|endoftext|> |
18062f8f6efb2665b834e63c4ba451207149f6ad4aefe16cbbe185f4ed547de1 | def main(argv=sys.argv):
'Main method called by the aip.'
try:
utils.vip_main(historian)
except Exception as e:
print(e)
_log.exception('unhandled exception') | Main method called by the aip. | services/core/ForwardHistorian/forwarder/agent.py | main | ashrafulhaque99/BEMOSS3.5 | 73 | python | def main(argv=sys.argv):
try:
utils.vip_main(historian)
except Exception as e:
print(e)
_log.exception('unhandled exception') | def main(argv=sys.argv):
try:
utils.vip_main(historian)
except Exception as e:
print(e)
_log.exception('unhandled exception')<|docstring|>Main method called by the aip.<|endoftext|> |
2fc836bce2d493d2c9e3ddf0d36ee190c6b9c8d8c91358314fe3ffadcb5af942 | @Core.receiver('onstart')
def starting_base(self, sender, **kwargs):
'\n Subscribes to the platform message bus on the actuator, record,\n datalogger, and device topics to capture data.\n '
def subscriber(subscription, callback_method):
_log.debug('subscribing to {}'.format(subscription))
self.vip.pubsub.subscribe(peer='pubsub', prefix=subscription, callback=callback_method)
_log.debug('Starting Forward historian')
for topic_subscriptions in services_topic_list:
subscriber(topic_subscriptions, self.capture_data)
for custom_topic in custom_topic_list:
subscriber(custom_topic, self.capture_data)
self._started = True | Subscribes to the platform message bus on the actuator, record,
datalogger, and device topics to capture data. | services/core/ForwardHistorian/forwarder/agent.py | starting_base | ashrafulhaque99/BEMOSS3.5 | 73 | python | @Core.receiver('onstart')
def starting_base(self, sender, **kwargs):
'\n Subscribes to the platform message bus on the actuator, record,\n datalogger, and device topics to capture data.\n '
def subscriber(subscription, callback_method):
_log.debug('subscribing to {}'.format(subscription))
self.vip.pubsub.subscribe(peer='pubsub', prefix=subscription, callback=callback_method)
_log.debug('Starting Forward historian')
for topic_subscriptions in services_topic_list:
subscriber(topic_subscriptions, self.capture_data)
for custom_topic in custom_topic_list:
subscriber(custom_topic, self.capture_data)
self._started = True | @Core.receiver('onstart')
def starting_base(self, sender, **kwargs):
'\n Subscribes to the platform message bus on the actuator, record,\n datalogger, and device topics to capture data.\n '
def subscriber(subscription, callback_method):
_log.debug('subscribing to {}'.format(subscription))
self.vip.pubsub.subscribe(peer='pubsub', prefix=subscription, callback=callback_method)
_log.debug('Starting Forward historian')
for topic_subscriptions in services_topic_list:
subscriber(topic_subscriptions, self.capture_data)
for custom_topic in custom_topic_list:
subscriber(custom_topic, self.capture_data)
self._started = True<|docstring|>Subscribes to the platform message bus on the actuator, record,
datalogger, and device topics to capture data.<|endoftext|> |
fc090acc53b060a2963d3cac7e09d588d273e07ea80e196c3e16a8fb1e3bcff5 | @click.command(name='set-active', context_settings=CONTEXT_SETTINGS)
@add_options(OPTIONS)
@click.pass_context
def cmd(ctx, url, key, secret, adapter_node, adapter_name, cnx_id, active, save_and_fetch, **kwargs):
'Activate/deactivate a connection.'
client = ctx.obj.start_client(url=url, key=key, secret=secret)
with ctx.obj.exc_wrap(wraperror=ctx.obj.wraperror):
try:
cnx = client.adapters.cnx.get_by_id(adapter_name=adapter_name, adapter_node=adapter_node, cnx_id=cnx_id)
cnx_new = client.adapters.cnx.set_cnx_active(cnx=cnx, value=active, save_and_fetch=save_and_fetch)
ctx.obj.echo_ok(msg='Connection updated successfully!')
except CnxUpdateError as exc:
ctx.obj.echo_error(msg=f'{exc}', abort=False)
cnx_new = exc.cnx_new
handle_export(ctx=ctx, rows=cnx_new, **kwargs) | Activate/deactivate a connection. | axonius_api_client/cli/grp_adapters/grp_cnx/cmd_set_active.py | cmd | kf-careem/axonius_api_client | 11 | python | @click.command(name='set-active', context_settings=CONTEXT_SETTINGS)
@add_options(OPTIONS)
@click.pass_context
def cmd(ctx, url, key, secret, adapter_node, adapter_name, cnx_id, active, save_and_fetch, **kwargs):
client = ctx.obj.start_client(url=url, key=key, secret=secret)
with ctx.obj.exc_wrap(wraperror=ctx.obj.wraperror):
try:
cnx = client.adapters.cnx.get_by_id(adapter_name=adapter_name, adapter_node=adapter_node, cnx_id=cnx_id)
cnx_new = client.adapters.cnx.set_cnx_active(cnx=cnx, value=active, save_and_fetch=save_and_fetch)
ctx.obj.echo_ok(msg='Connection updated successfully!')
except CnxUpdateError as exc:
ctx.obj.echo_error(msg=f'{exc}', abort=False)
cnx_new = exc.cnx_new
handle_export(ctx=ctx, rows=cnx_new, **kwargs) | @click.command(name='set-active', context_settings=CONTEXT_SETTINGS)
@add_options(OPTIONS)
@click.pass_context
def cmd(ctx, url, key, secret, adapter_node, adapter_name, cnx_id, active, save_and_fetch, **kwargs):
client = ctx.obj.start_client(url=url, key=key, secret=secret)
with ctx.obj.exc_wrap(wraperror=ctx.obj.wraperror):
try:
cnx = client.adapters.cnx.get_by_id(adapter_name=adapter_name, adapter_node=adapter_node, cnx_id=cnx_id)
cnx_new = client.adapters.cnx.set_cnx_active(cnx=cnx, value=active, save_and_fetch=save_and_fetch)
ctx.obj.echo_ok(msg='Connection updated successfully!')
except CnxUpdateError as exc:
ctx.obj.echo_error(msg=f'{exc}', abort=False)
cnx_new = exc.cnx_new
handle_export(ctx=ctx, rows=cnx_new, **kwargs)<|docstring|>Activate/deactivate a connection.<|endoftext|> |
5a5d070c206483008f365f76cb5300b7a14f2db1be89a20bb2a1e813c2279dc1 | def exchangeRefAlt(vcfLine):
'Exchange ref/alt representation of a vcf line\n Ref <---> alt\n '
ss = vcfLine
(ss[3], ss[4]) = (ss[4], ss[3])
for i in range(seqStart, len(ss)):
ns = list(ss[i])
ns[0] = convertMap[ns[0]]
ns[2] = convertMap[ns[2]]
ss[i] = ''.join(ns)
return ss | Exchange ref/alt representation of a vcf line
Ref <---> alt | GWAS/CorrectRef4VCF/CorrectRef4VCF.py | exchangeRefAlt | wavefancy/BIDMC-PYTHON | 0 | python | def exchangeRefAlt(vcfLine):
'Exchange ref/alt representation of a vcf line\n Ref <---> alt\n '
ss = vcfLine
(ss[3], ss[4]) = (ss[4], ss[3])
for i in range(seqStart, len(ss)):
ns = list(ss[i])
ns[0] = convertMap[ns[0]]
ns[2] = convertMap[ns[2]]
ss[i] = .join(ns)
return ss | def exchangeRefAlt(vcfLine):
'Exchange ref/alt representation of a vcf line\n Ref <---> alt\n '
ss = vcfLine
(ss[3], ss[4]) = (ss[4], ss[3])
for i in range(seqStart, len(ss)):
ns = list(ss[i])
ns[0] = convertMap[ns[0]]
ns[2] = convertMap[ns[2]]
ss[i] = .join(ns)
return ss<|docstring|>Exchange ref/alt representation of a vcf line
Ref <---> alt<|endoftext|> |
db8682f0d1e48dcfcae3f28f82fe77aff3679f1e072972d3df24c17198ca312e | def getOutputArray(vcfLine, refAllele):
'Check consistency of a vcf line, exchange ref/alt if necessary.\n return corrected vcfLine, [] if exchange also failed.\n '
out = []
ref = vcfLine[3].upper()
alt = vcfLine[4].upper()
if (ref_a == ref):
out = vcfLine
elif (ref_a == alt):
out = exchangeRefAlt(vcfLine)
else:
out = []
return out | Check consistency of a vcf line, exchange ref/alt if necessary.
return corrected vcfLine, [] if exchange also failed. | GWAS/CorrectRef4VCF/CorrectRef4VCF.py | getOutputArray | wavefancy/BIDMC-PYTHON | 0 | python | def getOutputArray(vcfLine, refAllele):
'Check consistency of a vcf line, exchange ref/alt if necessary.\n return corrected vcfLine, [] if exchange also failed.\n '
out = []
ref = vcfLine[3].upper()
alt = vcfLine[4].upper()
if (ref_a == ref):
out = vcfLine
elif (ref_a == alt):
out = exchangeRefAlt(vcfLine)
else:
out = []
return out | def getOutputArray(vcfLine, refAllele):
'Check consistency of a vcf line, exchange ref/alt if necessary.\n return corrected vcfLine, [] if exchange also failed.\n '
out = []
ref = vcfLine[3].upper()
alt = vcfLine[4].upper()
if (ref_a == ref):
out = vcfLine
elif (ref_a == alt):
out = exchangeRefAlt(vcfLine)
else:
out = []
return out<|docstring|>Check consistency of a vcf line, exchange ref/alt if necessary.
return corrected vcfLine, [] if exchange also failed.<|endoftext|> |
ba95ff4eaf7927022abbdb106ccacfb04d92ddbc229947c62c8f21b078d3ed3e | def get_label(self, name):
'\n\t\tReturns a label JSON-encoded object\n\t\t'
labels = self.api.get_labels()
for label in labels:
if (label.name == name):
return label
return None | Returns a label JSON-encoded object | lib/ics2doist.py | get_label | jamesbrond/ics2doist | 0 | python | def get_label(self, name):
'\n\t\t\n\t\t'
labels = self.api.get_labels()
for label in labels:
if (label.name == name):
return label
return None | def get_label(self, name):
'\n\t\t\n\t\t'
labels = self.api.get_labels()
for label in labels:
if (label.name == name):
return label
return None<|docstring|>Returns a label JSON-encoded object<|endoftext|> |
6220712118f5d8109093449d64be839be106f0bcdea6a86291feb7c1cd2685b0 | def label_ids(self, label_name):
"\n\t\tGet label id. If label doesn't exitst it will create it\n\t\t"
l = None
if label_name:
l = self.get_label(label_name)
if (l is None):
logging.debug(f'create new label {label_name}')
l = self.api.add_label({'name': label_name})
if (l != None):
l = [l.id]
return l | Get label id. If label doesn't exitst it will create it | lib/ics2doist.py | label_ids | jamesbrond/ics2doist | 0 | python | def label_ids(self, label_name):
"\n\t\t\n\t\t"
l = None
if label_name:
l = self.get_label(label_name)
if (l is None):
logging.debug(f'create new label {label_name}')
l = self.api.add_label({'name': label_name})
if (l != None):
l = [l.id]
return l | def label_ids(self, label_name):
"\n\t\t\n\t\t"
l = None
if label_name:
l = self.get_label(label_name)
if (l is None):
logging.debug(f'create new label {label_name}')
l = self.api.add_label({'name': label_name})
if (l != None):
l = [l.id]
return l<|docstring|>Get label id. If label doesn't exitst it will create it<|endoftext|> |
71a531a51ac772c7d8547c2f5cea9cc78c896fa4ec07ec8fb12cffc0a1f00329 | def javascript_confirm(url, js_msg, abort_on, *, escape_msg=True):
'Display a javascript confirm prompt.'
log.js.debug('confirm: {}'.format(js_msg))
js_msg = (html.escape(js_msg) if escape_msg else js_msg)
msg = 'From <b>{}</b>:<br/>{}'.format(html.escape(url.toDisplayString()), js_msg)
urlstr = url.toString((QUrl.RemovePassword | QUrl.FullyEncoded))
ans = message.ask('Javascript confirm', msg, mode=usertypes.PromptMode.yesno, abort_on=abort_on, url=urlstr)
return bool(ans) | Display a javascript confirm prompt. | luminos/browser/Shared.py | javascript_confirm | linuxaddict89/luminos | 0 | python | def javascript_confirm(url, js_msg, abort_on, *, escape_msg=True):
log.js.debug('confirm: {}'.format(js_msg))
js_msg = (html.escape(js_msg) if escape_msg else js_msg)
msg = 'From <b>{}</b>:<br/>{}'.format(html.escape(url.toDisplayString()), js_msg)
urlstr = url.toString((QUrl.RemovePassword | QUrl.FullyEncoded))
ans = message.ask('Javascript confirm', msg, mode=usertypes.PromptMode.yesno, abort_on=abort_on, url=urlstr)
return bool(ans) | def javascript_confirm(url, js_msg, abort_on, *, escape_msg=True):
log.js.debug('confirm: {}'.format(js_msg))
js_msg = (html.escape(js_msg) if escape_msg else js_msg)
msg = 'From <b>{}</b>:<br/>{}'.format(html.escape(url.toDisplayString()), js_msg)
urlstr = url.toString((QUrl.RemovePassword | QUrl.FullyEncoded))
ans = message.ask('Javascript confirm', msg, mode=usertypes.PromptMode.yesno, abort_on=abort_on, url=urlstr)
return bool(ans)<|docstring|>Display a javascript confirm prompt.<|endoftext|> |
59b77ee7d39d0c0726fa2813b07f013b5470cb3c5a2646a2b01336207d874df4 | def javascript_prompt(url, js_msg, default, abort_on, *, escape_msg=True):
'Display a javascript prompt.'
log.js.debug('prompt: {}'.format(js_msg))
js_msg = (html.escape(js_msg) if escape_msg else js_msg)
msg = '<b>{}</b> asks:<br/>{}'.format(html.escape(url.toDisplayString()), js_msg)
urlstr = url.toString((QUrl.RemovePassword | QUrl.FullyEncoded))
answer = message.ask('Javascript prompt', msg, mode=usertypes.PromptMode.text, default=default, abort_on=abort_on, url=urlstr)
if (answer is None):
return (False, '')
else:
return (True, answer) | Display a javascript prompt. | luminos/browser/Shared.py | javascript_prompt | linuxaddict89/luminos | 0 | python | def javascript_prompt(url, js_msg, default, abort_on, *, escape_msg=True):
log.js.debug('prompt: {}'.format(js_msg))
js_msg = (html.escape(js_msg) if escape_msg else js_msg)
msg = '<b>{}</b> asks:<br/>{}'.format(html.escape(url.toDisplayString()), js_msg)
urlstr = url.toString((QUrl.RemovePassword | QUrl.FullyEncoded))
answer = message.ask('Javascript prompt', msg, mode=usertypes.PromptMode.text, default=default, abort_on=abort_on, url=urlstr)
if (answer is None):
return (False, )
else:
return (True, answer) | def javascript_prompt(url, js_msg, default, abort_on, *, escape_msg=True):
log.js.debug('prompt: {}'.format(js_msg))
js_msg = (html.escape(js_msg) if escape_msg else js_msg)
msg = '<b>{}</b> asks:<br/>{}'.format(html.escape(url.toDisplayString()), js_msg)
urlstr = url.toString((QUrl.RemovePassword | QUrl.FullyEncoded))
answer = message.ask('Javascript prompt', msg, mode=usertypes.PromptMode.text, default=default, abort_on=abort_on, url=urlstr)
if (answer is None):
return (False, )
else:
return (True, answer)<|docstring|>Display a javascript prompt.<|endoftext|> |
bf1a19c77458501d312efe0856ed6f5baef31895e6a918315c94705e56d24112 | def javascript_alert(url, js_msg, abort_on, *, escape_msg=True):
'Display a javascript alert.'
log.js.debug('alert: {}'.format(js_msg))
js_msg = (html.escape(js_msg) if escape_msg else js_msg)
msg = 'From <b>{}</b>:<br/>{}'.format(html.escape(url.toDisplayString()), js_msg)
urlstr = url.toString((QUrl.RemovePassword | QUrl.FullyEncoded))
message.ask('Javascript alert', msg, mode=usertypes.PromptMode.alert, abort_on=abort_on, url=urlstr) | Display a javascript alert. | luminos/browser/Shared.py | javascript_alert | linuxaddict89/luminos | 0 | python | def javascript_alert(url, js_msg, abort_on, *, escape_msg=True):
log.js.debug('alert: {}'.format(js_msg))
js_msg = (html.escape(js_msg) if escape_msg else js_msg)
msg = 'From <b>{}</b>:<br/>{}'.format(html.escape(url.toDisplayString()), js_msg)
urlstr = url.toString((QUrl.RemovePassword | QUrl.FullyEncoded))
message.ask('Javascript alert', msg, mode=usertypes.PromptMode.alert, abort_on=abort_on, url=urlstr) | def javascript_alert(url, js_msg, abort_on, *, escape_msg=True):
log.js.debug('alert: {}'.format(js_msg))
js_msg = (html.escape(js_msg) if escape_msg else js_msg)
msg = 'From <b>{}</b>:<br/>{}'.format(html.escape(url.toDisplayString()), js_msg)
urlstr = url.toString((QUrl.RemovePassword | QUrl.FullyEncoded))
message.ask('Javascript alert', msg, mode=usertypes.PromptMode.alert, abort_on=abort_on, url=urlstr)<|docstring|>Display a javascript alert.<|endoftext|> |
d99336a87df5e544c8c6f58984b40c7af0527d7ea1a7b8a7dae9d0cc662d50f9 | def javascript_log_message(level, source, line, msg):
'Display a JavaScript log message.'
logstring = '[{}:{}] {}'.format(source, line, msg)
logger = None
if (level == usertypes.JsLogLevel.info):
logger = log.js.info
if (level == usertypes.JsLogLevel.warning):
logger = log.js.warning
if (level == usertypes.JsLogLevel.error):
logger = log.js.error
logger(logstring) | Display a JavaScript log message. | luminos/browser/Shared.py | javascript_log_message | linuxaddict89/luminos | 0 | python | def javascript_log_message(level, source, line, msg):
logstring = '[{}:{}] {}'.format(source, line, msg)
logger = None
if (level == usertypes.JsLogLevel.info):
logger = log.js.info
if (level == usertypes.JsLogLevel.warning):
logger = log.js.warning
if (level == usertypes.JsLogLevel.error):
logger = log.js.error
logger(logstring) | def javascript_log_message(level, source, line, msg):
logstring = '[{}:{}] {}'.format(source, line, msg)
logger = None
if (level == usertypes.JsLogLevel.info):
logger = log.js.info
if (level == usertypes.JsLogLevel.warning):
logger = log.js.warning
if (level == usertypes.JsLogLevel.error):
logger = log.js.error
logger(logstring)<|docstring|>Display a JavaScript log message.<|endoftext|> |
cdd75aac2fb251eae0e248f2409eaf9025dd3127d8aeb299467a3ae383ecb069 | def default_units(self, kwargs):
'\n Return the unit value and the default units specified\n from the given keyword arguments dictionary.\n '
aliases = self.get_aliases()
laliases = self.get_lowercase_aliases()
units = self.get_units()
val = 0.0
default_unit = self.STANDARD_UNIT
for (unit, value) in kwargs.items():
if (unit in units):
val = self._convert_value_from(units[unit], value)
default_unit = unit
elif (unit in aliases):
u = aliases[unit]
val = self._convert_value_from(units[u], value)
default_unit = u
else:
lower = unit.lower()
if (lower in units):
val = self._convert_value_from(units[lower], value)
default_unit = lower
elif (lower in laliases):
u = laliases[lower]
val = self._convert_value_from(units[u], value)
default_unit = u
else:
raise AttributeError(('Unknown unit type: %s' % unit))
return (val, default_unit) | Return the unit value and the default units specified
from the given keyword arguments dictionary. | env/Lib/site-packages/measurement/base.py | default_units | UtkarshR8j/Jeeva-server | 2 | python | def default_units(self, kwargs):
'\n Return the unit value and the default units specified\n from the given keyword arguments dictionary.\n '
aliases = self.get_aliases()
laliases = self.get_lowercase_aliases()
units = self.get_units()
val = 0.0
default_unit = self.STANDARD_UNIT
for (unit, value) in kwargs.items():
if (unit in units):
val = self._convert_value_from(units[unit], value)
default_unit = unit
elif (unit in aliases):
u = aliases[unit]
val = self._convert_value_from(units[u], value)
default_unit = u
else:
lower = unit.lower()
if (lower in units):
val = self._convert_value_from(units[lower], value)
default_unit = lower
elif (lower in laliases):
u = laliases[lower]
val = self._convert_value_from(units[u], value)
default_unit = u
else:
raise AttributeError(('Unknown unit type: %s' % unit))
return (val, default_unit) | def default_units(self, kwargs):
'\n Return the unit value and the default units specified\n from the given keyword arguments dictionary.\n '
aliases = self.get_aliases()
laliases = self.get_lowercase_aliases()
units = self.get_units()
val = 0.0
default_unit = self.STANDARD_UNIT
for (unit, value) in kwargs.items():
if (unit in units):
val = self._convert_value_from(units[unit], value)
default_unit = unit
elif (unit in aliases):
u = aliases[unit]
val = self._convert_value_from(units[u], value)
default_unit = u
else:
lower = unit.lower()
if (lower in units):
val = self._convert_value_from(units[lower], value)
default_unit = lower
elif (lower in laliases):
u = laliases[lower]
val = self._convert_value_from(units[u], value)
default_unit = u
else:
raise AttributeError(('Unknown unit type: %s' % unit))
return (val, default_unit)<|docstring|>Return the unit value and the default units specified
from the given keyword arguments dictionary.<|endoftext|> |
864117164ff6e38e6c5a69f3c2833476d10088b5640a46b5713152db6db15716 | @classmethod
def unit_attname(cls, unit_str):
"\n Retrieves the unit attribute name for the given unit string.\n For example, if the given unit string is 'metre', 'm' would be returned.\n An exception is raised if an attribute cannot be found.\n "
laliases = cls.get_lowercase_aliases()
units = cls.get_units()
lower = unit_str.lower()
if (unit_str in units):
return unit_str
elif (lower in units):
return lower
elif (lower in laliases):
return laliases[lower]
else:
raise Exception(('Could not find a unit keyword associated with "%s"' % (unit_str,))) | Retrieves the unit attribute name for the given unit string.
For example, if the given unit string is 'metre', 'm' would be returned.
An exception is raised if an attribute cannot be found. | env/Lib/site-packages/measurement/base.py | unit_attname | UtkarshR8j/Jeeva-server | 2 | python | @classmethod
def unit_attname(cls, unit_str):
"\n Retrieves the unit attribute name for the given unit string.\n For example, if the given unit string is 'metre', 'm' would be returned.\n An exception is raised if an attribute cannot be found.\n "
laliases = cls.get_lowercase_aliases()
units = cls.get_units()
lower = unit_str.lower()
if (unit_str in units):
return unit_str
elif (lower in units):
return lower
elif (lower in laliases):
return laliases[lower]
else:
raise Exception(('Could not find a unit keyword associated with "%s"' % (unit_str,))) | @classmethod
def unit_attname(cls, unit_str):
"\n Retrieves the unit attribute name for the given unit string.\n For example, if the given unit string is 'metre', 'm' would be returned.\n An exception is raised if an attribute cannot be found.\n "
laliases = cls.get_lowercase_aliases()
units = cls.get_units()
lower = unit_str.lower()
if (unit_str in units):
return unit_str
elif (lower in units):
return lower
elif (lower in laliases):
return laliases[lower]
else:
raise Exception(('Could not find a unit keyword associated with "%s"' % (unit_str,)))<|docstring|>Retrieves the unit attribute name for the given unit string.
For example, if the given unit string is 'metre', 'm' would be returned.
An exception is raised if an attribute cannot be found.<|endoftext|> |
aa433758909c43fc57f62a23fda8d2568bf0267adb11310bb05039595136d1fa | def get_l2_fn(target_nt_names, target_opname, arg_nts, arg_name, empty_seq_name, is_const):
"Generate L2 function name from IMM NT names list and EOSZ NT names list.\n\n Each L2 function is defined by a single PATTERN row in xed's grammar.\n (By pattern's IMM-binding and EOSZ-binding NTs)\n Hence, it is enough to know the IMM NTs sequence and EOSZ NTs sequence to\n define a L2 function. Or in this case to define a L2 function name.\n\n ATTENTION: as we decided to hardcode special AMD's double immediate\n instruction's L1 functions, the length of imm_nt_names can be ONLY 1\n\n @param imm_nt_names: list of IMM-binding NT names\n @param eosz_nt_names: list of EOSZ-binding NT names\n\n @return: L2 function name\n\n "
if (len(target_nt_names) == 0):
return empty_seq_name
if (len(target_nt_names) > 1):
ildutil.ild_err(('Cannot generate L2 function name for NT seq %s' % target_nt_names))
if is_const:
arg_suffix = _arg_const_suffix
else:
arg_suffix = '_'.join((arg_nts + [arg_name]))
l3_prefix = ild_nt.get_lufn(target_nt_names, target_opname)
return (l3_prefix + ('_%s_l2' % arg_suffix)) | Generate L2 function name from IMM NT names list and EOSZ NT names list.
Each L2 function is defined by a single PATTERN row in xed's grammar.
(By pattern's IMM-binding and EOSZ-binding NTs)
Hence, it is enough to know the IMM NTs sequence and EOSZ NTs sequence to
define a L2 function. Or in this case to define a L2 function name.
ATTENTION: as we decided to hardcode special AMD's double immediate
instruction's L1 functions, the length of imm_nt_names can be ONLY 1
@param imm_nt_names: list of IMM-binding NT names
@param eosz_nt_names: list of EOSZ-binding NT names
@return: L2 function name | pysrc/ild_codegen.py | get_l2_fn | javiereguiluz/xed | 1,261 | python | def get_l2_fn(target_nt_names, target_opname, arg_nts, arg_name, empty_seq_name, is_const):
"Generate L2 function name from IMM NT names list and EOSZ NT names list.\n\n Each L2 function is defined by a single PATTERN row in xed's grammar.\n (By pattern's IMM-binding and EOSZ-binding NTs)\n Hence, it is enough to know the IMM NTs sequence and EOSZ NTs sequence to\n define a L2 function. Or in this case to define a L2 function name.\n\n ATTENTION: as we decided to hardcode special AMD's double immediate\n instruction's L1 functions, the length of imm_nt_names can be ONLY 1\n\n @param imm_nt_names: list of IMM-binding NT names\n @param eosz_nt_names: list of EOSZ-binding NT names\n\n @return: L2 function name\n\n "
if (len(target_nt_names) == 0):
return empty_seq_name
if (len(target_nt_names) > 1):
ildutil.ild_err(('Cannot generate L2 function name for NT seq %s' % target_nt_names))
if is_const:
arg_suffix = _arg_const_suffix
else:
arg_suffix = '_'.join((arg_nts + [arg_name]))
l3_prefix = ild_nt.get_lufn(target_nt_names, target_opname)
return (l3_prefix + ('_%s_l2' % arg_suffix)) | def get_l2_fn(target_nt_names, target_opname, arg_nts, arg_name, empty_seq_name, is_const):
"Generate L2 function name from IMM NT names list and EOSZ NT names list.\n\n Each L2 function is defined by a single PATTERN row in xed's grammar.\n (By pattern's IMM-binding and EOSZ-binding NTs)\n Hence, it is enough to know the IMM NTs sequence and EOSZ NTs sequence to\n define a L2 function. Or in this case to define a L2 function name.\n\n ATTENTION: as we decided to hardcode special AMD's double immediate\n instruction's L1 functions, the length of imm_nt_names can be ONLY 1\n\n @param imm_nt_names: list of IMM-binding NT names\n @param eosz_nt_names: list of EOSZ-binding NT names\n\n @return: L2 function name\n\n "
if (len(target_nt_names) == 0):
return empty_seq_name
if (len(target_nt_names) > 1):
ildutil.ild_err(('Cannot generate L2 function name for NT seq %s' % target_nt_names))
if is_const:
arg_suffix = _arg_const_suffix
else:
arg_suffix = '_'.join((arg_nts + [arg_name]))
l3_prefix = ild_nt.get_lufn(target_nt_names, target_opname)
return (l3_prefix + ('_%s_l2' % arg_suffix))<|docstring|>Generate L2 function name from IMM NT names list and EOSZ NT names list.
Each L2 function is defined by a single PATTERN row in xed's grammar.
(By pattern's IMM-binding and EOSZ-binding NTs)
Hence, it is enough to know the IMM NTs sequence and EOSZ NTs sequence to
define a L2 function. Or in this case to define a L2 function name.
ATTENTION: as we decided to hardcode special AMD's double immediate
instruction's L1 functions, the length of imm_nt_names can be ONLY 1
@param imm_nt_names: list of IMM-binding NT names
@param eosz_nt_names: list of EOSZ-binding NT names
@return: L2 function name<|endoftext|> |
fa9c2170abbe98f6fb700fbfeeea41f9819c17346baa15c588f900f4de58ac30 | def gen_l2_func_list(agi, target_nt_dict, arg_nt_dict, ild_t_member):
'generate L2 functions'
l2_func_list = []
for (nt_name, array) in sorted(target_nt_dict.items()):
target_opname = array.get_target_opname()
if array.is_const_lookup_fun():
fo = gen_const_l2_function(agi, nt_name, target_opname, ild_t_member)
l2_func_list.append(fo)
else:
for (arg_nt_seq, arg_arr) in sorted(arg_nt_dict.items()):
fo = gen_scalable_l2_function(agi, nt_name, target_opname, ild_t_member, arg_arr, list(arg_nt_seq))
l2_func_list.append(fo)
return l2_func_list | generate L2 functions | pysrc/ild_codegen.py | gen_l2_func_list | javiereguiluz/xed | 1,261 | python | def gen_l2_func_list(agi, target_nt_dict, arg_nt_dict, ild_t_member):
l2_func_list = []
for (nt_name, array) in sorted(target_nt_dict.items()):
target_opname = array.get_target_opname()
if array.is_const_lookup_fun():
fo = gen_const_l2_function(agi, nt_name, target_opname, ild_t_member)
l2_func_list.append(fo)
else:
for (arg_nt_seq, arg_arr) in sorted(arg_nt_dict.items()):
fo = gen_scalable_l2_function(agi, nt_name, target_opname, ild_t_member, arg_arr, list(arg_nt_seq))
l2_func_list.append(fo)
return l2_func_list | def gen_l2_func_list(agi, target_nt_dict, arg_nt_dict, ild_t_member):
l2_func_list = []
for (nt_name, array) in sorted(target_nt_dict.items()):
target_opname = array.get_target_opname()
if array.is_const_lookup_fun():
fo = gen_const_l2_function(agi, nt_name, target_opname, ild_t_member)
l2_func_list.append(fo)
else:
for (arg_nt_seq, arg_arr) in sorted(arg_nt_dict.items()):
fo = gen_scalable_l2_function(agi, nt_name, target_opname, ild_t_member, arg_arr, list(arg_nt_seq))
l2_func_list.append(fo)
return l2_func_list<|docstring|>generate L2 functions<|endoftext|> |
ef53ebd13ed86f0640abb9628d82e6e98e782d7c148d8f015ae72210a741e6b7 | def _test_map_all_zero(vv, phash_map_lu):
'phash_map_lu is a dict[maps][0...255] pointing to a 2nd level\n lookup or it might be None indicating an empty map.'
all_zero_map = collections.defaultdict(bool)
for xmap in phash_map_lu.keys():
omap = phash_map_lu[xmap]
if (omap == None):
all_zero_map[xmap] = True
mbuild.msgb('ALL ZEROS', 'VV={} MAP={}'.format(vv, xmap))
return all_zero_map | phash_map_lu is a dict[maps][0...255] pointing to a 2nd level
lookup or it might be None indicating an empty map. | pysrc/ild_codegen.py | _test_map_all_zero | javiereguiluz/xed | 1,261 | python | def _test_map_all_zero(vv, phash_map_lu):
'phash_map_lu is a dict[maps][0...255] pointing to a 2nd level\n lookup or it might be None indicating an empty map.'
all_zero_map = collections.defaultdict(bool)
for xmap in phash_map_lu.keys():
omap = phash_map_lu[xmap]
if (omap == None):
all_zero_map[xmap] = True
mbuild.msgb('ALL ZEROS', 'VV={} MAP={}'.format(vv, xmap))
return all_zero_map | def _test_map_all_zero(vv, phash_map_lu):
'phash_map_lu is a dict[maps][0...255] pointing to a 2nd level\n lookup or it might be None indicating an empty map.'
all_zero_map = collections.defaultdict(bool)
for xmap in phash_map_lu.keys():
omap = phash_map_lu[xmap]
if (omap == None):
all_zero_map[xmap] = True
mbuild.msgb('ALL ZEROS', 'VV={} MAP={}'.format(vv, xmap))
return all_zero_map<|docstring|>phash_map_lu is a dict[maps][0...255] pointing to a 2nd level
lookup or it might be None indicating an empty map.<|endoftext|> |
9e28c0efd4e8f861613a6af9072f93c6ae0eca98981482f69cef5d756dd234c8 | def gen_static_decode(agi, vv_lu, op_lu_list, h_fn='xed3-phash.h'):
'generate static decoder'
phash_headers = ['xed-ild-eosz-getters.h', 'xed-ild-easz-getters.h', 'xed-internal-header.h', 'xed-ild-private.h']
maplu_headers = []
all_zero_by_map = {}
for vv in sorted(vv_lu.keys()):
(phash_map_lu, lu_fo_list) = vv_lu[vv]
all_zero_by_map[vv] = _test_map_all_zero(vv, phash_map_lu)
pheader = 'xed3-phash-vv{}.h'.format(vv)
dump_flist_2_header(agi, pheader, ['xed3-operand-lu.h'], lu_fo_list)
map_lu_cfn = 'xed3-phash-lu-vv{}.c'.format(vv)
map_lu_hfn = 'xed3-phash-lu-vv{}.h'.format(vv)
maplu_headers.append(map_lu_hfn)
name_pfx = 'xed3_phash_vv{}'.format(vv)
elem_type = 'xed3_find_func_t'
dump_lookup(agi, phash_map_lu, name_pfx, map_lu_cfn, [pheader], elem_type, output_dir=None, all_zero_by_map=all_zero_by_map[vv])
h_file = agi.open_file(mbuild.join('include-private', map_lu_hfn), start=False)
h_file.start()
for insn_map in sorted(phash_map_lu.keys()):
arr_name = _get_map_lu_name(name_pfx, insn_map)
if all_zero_by_map[vv][insn_map]:
pass
else:
h_file.add_code('extern const {} {}[256];'.format(elem_type, arr_name))
h_file.close()
hdr = 'xed3-operand-lu.h'
dump_flist_2_header(agi, hdr, phash_headers, op_lu_list, emit_bodies=False)
dump_flist_2_header(agi, 'xed3-operand-lu.c', [hdr], op_lu_list, is_private=False, emit_headers=False)
h_file = agi.open_file(mbuild.join('include-private', h_fn), start=False)
for header in maplu_headers:
h_file.add_header(header)
h_file.start()
maps = ild_info.get_maps(agi)
vv_num = [int(x) for x in vv_lu.keys()]
vv_max = (max(vv_num) + 1)
max_maps = (ild_info.get_maps_max_id(agi) + 1)
arr_name = 'xed3_phash_lu'
h_file.add_code('#define XED_PHASH_MAP_LIMIT {}'.format(max_maps))
h_file.add_code('const xed3_find_func_t* {}[{}][XED_PHASH_MAP_LIMIT] = {{'.format(arr_name, vv_max))
for vv in range(0, vv_max):
maps = ild_info.get_maps_for_space(agi, vv)
dmap = {mi.map_id: mi for mi in maps}
init_vals = (['0'] * max_maps)
for imap in range(0, max_maps):
if (imap in dmap):
mi = dmap[imap]
if all_zero_by_map[str(vv)][mi.map_name]:
init_vals[imap] = '0'
else:
init_vals[imap] = _get_map_lu_name('xed3_phash_vv{}'.format(vv), mi.map_name)
h_file.add_code('{{ {} }},'.format(', '.join(init_vals)))
h_file.add_code('};')
h_file.close() | generate static decoder | pysrc/ild_codegen.py | gen_static_decode | javiereguiluz/xed | 1,261 | python | def gen_static_decode(agi, vv_lu, op_lu_list, h_fn='xed3-phash.h'):
phash_headers = ['xed-ild-eosz-getters.h', 'xed-ild-easz-getters.h', 'xed-internal-header.h', 'xed-ild-private.h']
maplu_headers = []
all_zero_by_map = {}
for vv in sorted(vv_lu.keys()):
(phash_map_lu, lu_fo_list) = vv_lu[vv]
all_zero_by_map[vv] = _test_map_all_zero(vv, phash_map_lu)
pheader = 'xed3-phash-vv{}.h'.format(vv)
dump_flist_2_header(agi, pheader, ['xed3-operand-lu.h'], lu_fo_list)
map_lu_cfn = 'xed3-phash-lu-vv{}.c'.format(vv)
map_lu_hfn = 'xed3-phash-lu-vv{}.h'.format(vv)
maplu_headers.append(map_lu_hfn)
name_pfx = 'xed3_phash_vv{}'.format(vv)
elem_type = 'xed3_find_func_t'
dump_lookup(agi, phash_map_lu, name_pfx, map_lu_cfn, [pheader], elem_type, output_dir=None, all_zero_by_map=all_zero_by_map[vv])
h_file = agi.open_file(mbuild.join('include-private', map_lu_hfn), start=False)
h_file.start()
for insn_map in sorted(phash_map_lu.keys()):
arr_name = _get_map_lu_name(name_pfx, insn_map)
if all_zero_by_map[vv][insn_map]:
pass
else:
h_file.add_code('extern const {} {}[256];'.format(elem_type, arr_name))
h_file.close()
hdr = 'xed3-operand-lu.h'
dump_flist_2_header(agi, hdr, phash_headers, op_lu_list, emit_bodies=False)
dump_flist_2_header(agi, 'xed3-operand-lu.c', [hdr], op_lu_list, is_private=False, emit_headers=False)
h_file = agi.open_file(mbuild.join('include-private', h_fn), start=False)
for header in maplu_headers:
h_file.add_header(header)
h_file.start()
maps = ild_info.get_maps(agi)
vv_num = [int(x) for x in vv_lu.keys()]
vv_max = (max(vv_num) + 1)
max_maps = (ild_info.get_maps_max_id(agi) + 1)
arr_name = 'xed3_phash_lu'
h_file.add_code('#define XED_PHASH_MAP_LIMIT {}'.format(max_maps))
h_file.add_code('const xed3_find_func_t* {}[{}][XED_PHASH_MAP_LIMIT] = {{'.format(arr_name, vv_max))
for vv in range(0, vv_max):
maps = ild_info.get_maps_for_space(agi, vv)
dmap = {mi.map_id: mi for mi in maps}
init_vals = (['0'] * max_maps)
for imap in range(0, max_maps):
if (imap in dmap):
mi = dmap[imap]
if all_zero_by_map[str(vv)][mi.map_name]:
init_vals[imap] = '0'
else:
init_vals[imap] = _get_map_lu_name('xed3_phash_vv{}'.format(vv), mi.map_name)
h_file.add_code('{{ {} }},'.format(', '.join(init_vals)))
h_file.add_code('};')
h_file.close() | def gen_static_decode(agi, vv_lu, op_lu_list, h_fn='xed3-phash.h'):
phash_headers = ['xed-ild-eosz-getters.h', 'xed-ild-easz-getters.h', 'xed-internal-header.h', 'xed-ild-private.h']
maplu_headers = []
all_zero_by_map = {}
for vv in sorted(vv_lu.keys()):
(phash_map_lu, lu_fo_list) = vv_lu[vv]
all_zero_by_map[vv] = _test_map_all_zero(vv, phash_map_lu)
pheader = 'xed3-phash-vv{}.h'.format(vv)
dump_flist_2_header(agi, pheader, ['xed3-operand-lu.h'], lu_fo_list)
map_lu_cfn = 'xed3-phash-lu-vv{}.c'.format(vv)
map_lu_hfn = 'xed3-phash-lu-vv{}.h'.format(vv)
maplu_headers.append(map_lu_hfn)
name_pfx = 'xed3_phash_vv{}'.format(vv)
elem_type = 'xed3_find_func_t'
dump_lookup(agi, phash_map_lu, name_pfx, map_lu_cfn, [pheader], elem_type, output_dir=None, all_zero_by_map=all_zero_by_map[vv])
h_file = agi.open_file(mbuild.join('include-private', map_lu_hfn), start=False)
h_file.start()
for insn_map in sorted(phash_map_lu.keys()):
arr_name = _get_map_lu_name(name_pfx, insn_map)
if all_zero_by_map[vv][insn_map]:
pass
else:
h_file.add_code('extern const {} {}[256];'.format(elem_type, arr_name))
h_file.close()
hdr = 'xed3-operand-lu.h'
dump_flist_2_header(agi, hdr, phash_headers, op_lu_list, emit_bodies=False)
dump_flist_2_header(agi, 'xed3-operand-lu.c', [hdr], op_lu_list, is_private=False, emit_headers=False)
h_file = agi.open_file(mbuild.join('include-private', h_fn), start=False)
for header in maplu_headers:
h_file.add_header(header)
h_file.start()
maps = ild_info.get_maps(agi)
vv_num = [int(x) for x in vv_lu.keys()]
vv_max = (max(vv_num) + 1)
max_maps = (ild_info.get_maps_max_id(agi) + 1)
arr_name = 'xed3_phash_lu'
h_file.add_code('#define XED_PHASH_MAP_LIMIT {}'.format(max_maps))
h_file.add_code('const xed3_find_func_t* {}[{}][XED_PHASH_MAP_LIMIT] = {{'.format(arr_name, vv_max))
for vv in range(0, vv_max):
maps = ild_info.get_maps_for_space(agi, vv)
dmap = {mi.map_id: mi for mi in maps}
init_vals = (['0'] * max_maps)
for imap in range(0, max_maps):
if (imap in dmap):
mi = dmap[imap]
if all_zero_by_map[str(vv)][mi.map_name]:
init_vals[imap] = '0'
else:
init_vals[imap] = _get_map_lu_name('xed3_phash_vv{}'.format(vv), mi.map_name)
h_file.add_code('{{ {} }},'.format(', '.join(init_vals)))
h_file.add_code('};')
h_file.close()<|docstring|>generate static decoder<|endoftext|> |
12b3302868c6e26c0fe498878eed717e7597e9ae3d241f3bc093f7043c54af24 | def dump_lookup(agi, l1_lookup, name_pfx, lu_h_fn, headers, lu_elem_type, define_dict=None, all_zero_by_map=None, output_dir='include-private'):
"Dump the lookup tables - from opcode value to\n the L1 function pointers (in most cases they are L2 function pointers,\n which doesn't matter, because they have the same signature)\n @param l1_lookup: 2D dict so that\n l1_lookup[string(insn_map)][string(opcode)] == string(L1_function_name)\n all 0..255 opcode values should be set in the dict, so that if 0x0,0x0F\n map-opcode is illegal, then l1_lookup['0x0']['0x0F'] should be set\n to some string indicating that L1 function is undefined.\n\n all_zero_by_map is an optional dict[map] -> {True,False}. If True\n skip emitting the map.\n \n return a dictionary of the array names generated. "
if output_dir:
ofn = mbuild.join(output_dir, lu_h_fn)
else:
ofn = lu_h_fn
h_file = agi.open_file(ofn, start=False)
for header in headers:
h_file.add_header(header)
h_file.start()
if define_dict:
print_defines(h_file, define_dict)
array_names = _dump_lookup_low(agi, h_file, l1_lookup, name_pfx, lu_elem_type, all_zero_by_map)
h_file.close()
return array_names | Dump the lookup tables - from opcode value to
the L1 function pointers (in most cases they are L2 function pointers,
which doesn't matter, because they have the same signature)
@param l1_lookup: 2D dict so that
l1_lookup[string(insn_map)][string(opcode)] == string(L1_function_name)
all 0..255 opcode values should be set in the dict, so that if 0x0,0x0F
map-opcode is illegal, then l1_lookup['0x0']['0x0F'] should be set
to some string indicating that L1 function is undefined.
all_zero_by_map is an optional dict[map] -> {True,False}. If True
skip emitting the map.
return a dictionary of the array names generated. | pysrc/ild_codegen.py | dump_lookup | javiereguiluz/xed | 1,261 | python | def dump_lookup(agi, l1_lookup, name_pfx, lu_h_fn, headers, lu_elem_type, define_dict=None, all_zero_by_map=None, output_dir='include-private'):
"Dump the lookup tables - from opcode value to\n the L1 function pointers (in most cases they are L2 function pointers,\n which doesn't matter, because they have the same signature)\n @param l1_lookup: 2D dict so that\n l1_lookup[string(insn_map)][string(opcode)] == string(L1_function_name)\n all 0..255 opcode values should be set in the dict, so that if 0x0,0x0F\n map-opcode is illegal, then l1_lookup['0x0']['0x0F'] should be set\n to some string indicating that L1 function is undefined.\n\n all_zero_by_map is an optional dict[map] -> {True,False}. If True\n skip emitting the map.\n \n return a dictionary of the array names generated. "
if output_dir:
ofn = mbuild.join(output_dir, lu_h_fn)
else:
ofn = lu_h_fn
h_file = agi.open_file(ofn, start=False)
for header in headers:
h_file.add_header(header)
h_file.start()
if define_dict:
print_defines(h_file, define_dict)
array_names = _dump_lookup_low(agi, h_file, l1_lookup, name_pfx, lu_elem_type, all_zero_by_map)
h_file.close()
return array_names | def dump_lookup(agi, l1_lookup, name_pfx, lu_h_fn, headers, lu_elem_type, define_dict=None, all_zero_by_map=None, output_dir='include-private'):
"Dump the lookup tables - from opcode value to\n the L1 function pointers (in most cases they are L2 function pointers,\n which doesn't matter, because they have the same signature)\n @param l1_lookup: 2D dict so that\n l1_lookup[string(insn_map)][string(opcode)] == string(L1_function_name)\n all 0..255 opcode values should be set in the dict, so that if 0x0,0x0F\n map-opcode is illegal, then l1_lookup['0x0']['0x0F'] should be set\n to some string indicating that L1 function is undefined.\n\n all_zero_by_map is an optional dict[map] -> {True,False}. If True\n skip emitting the map.\n \n return a dictionary of the array names generated. "
if output_dir:
ofn = mbuild.join(output_dir, lu_h_fn)
else:
ofn = lu_h_fn
h_file = agi.open_file(ofn, start=False)
for header in headers:
h_file.add_header(header)
h_file.start()
if define_dict:
print_defines(h_file, define_dict)
array_names = _dump_lookup_low(agi, h_file, l1_lookup, name_pfx, lu_elem_type, all_zero_by_map)
h_file.close()
return array_names<|docstring|>Dump the lookup tables - from opcode value to
the L1 function pointers (in most cases they are L2 function pointers,
which doesn't matter, because they have the same signature)
@param l1_lookup: 2D dict so that
l1_lookup[string(insn_map)][string(opcode)] == string(L1_function_name)
all 0..255 opcode values should be set in the dict, so that if 0x0,0x0F
map-opcode is illegal, then l1_lookup['0x0']['0x0F'] should be set
to some string indicating that L1 function is undefined.
all_zero_by_map is an optional dict[map] -> {True,False}. If True
skip emitting the map.
return a dictionary of the array names generated.<|endoftext|> |
22051faca72edfd2eefdc8e562cbedf28858d35a634dc1ca85520720d0b517a2 | def _dump_lookup_low(agi, h_file, l1_lookup, name_pfx, lu_elem_type, all_zero_by_map=None):
"Dump the lookup tables - from opcode value to\n the L1 function pointers (in most cases they are L2 function pointers,\n which doesn't matter, because they have the same signature)\n @param l1_lookup: 2D dict so that\n l1_lookup[string(insn_map)][string(opcode)] == string(L1_function_name)\n all 0..255 opcode values should be set in the dict, so that if 0x0,0x0F\n map-opcode is illegal, then l1_lookup['0x0']['0x0F'] should be set\n to some string indicating that L1 function is undefined.\n\n all_zero_by_map is an optional dict[map] -> {True,False}. If True\n skip emitting the map.\n \n return a dictionary of the array names generated. "
array_names = {}
for insn_map in sorted(l1_lookup.keys()):
arr_name = _get_map_lu_name(name_pfx, insn_map)
if ((all_zero_by_map == None) or (all_zero_by_map[insn_map] == False)):
ild_dump_map_array(l1_lookup[insn_map], arr_name, lu_elem_type, h_file)
array_names[insn_map] = arr_name
return array_names | Dump the lookup tables - from opcode value to
the L1 function pointers (in most cases they are L2 function pointers,
which doesn't matter, because they have the same signature)
@param l1_lookup: 2D dict so that
l1_lookup[string(insn_map)][string(opcode)] == string(L1_function_name)
all 0..255 opcode values should be set in the dict, so that if 0x0,0x0F
map-opcode is illegal, then l1_lookup['0x0']['0x0F'] should be set
to some string indicating that L1 function is undefined.
all_zero_by_map is an optional dict[map] -> {True,False}. If True
skip emitting the map.
return a dictionary of the array names generated. | pysrc/ild_codegen.py | _dump_lookup_low | javiereguiluz/xed | 1,261 | python | def _dump_lookup_low(agi, h_file, l1_lookup, name_pfx, lu_elem_type, all_zero_by_map=None):
"Dump the lookup tables - from opcode value to\n the L1 function pointers (in most cases they are L2 function pointers,\n which doesn't matter, because they have the same signature)\n @param l1_lookup: 2D dict so that\n l1_lookup[string(insn_map)][string(opcode)] == string(L1_function_name)\n all 0..255 opcode values should be set in the dict, so that if 0x0,0x0F\n map-opcode is illegal, then l1_lookup['0x0']['0x0F'] should be set\n to some string indicating that L1 function is undefined.\n\n all_zero_by_map is an optional dict[map] -> {True,False}. If True\n skip emitting the map.\n \n return a dictionary of the array names generated. "
array_names = {}
for insn_map in sorted(l1_lookup.keys()):
arr_name = _get_map_lu_name(name_pfx, insn_map)
if ((all_zero_by_map == None) or (all_zero_by_map[insn_map] == False)):
ild_dump_map_array(l1_lookup[insn_map], arr_name, lu_elem_type, h_file)
array_names[insn_map] = arr_name
return array_names | def _dump_lookup_low(agi, h_file, l1_lookup, name_pfx, lu_elem_type, all_zero_by_map=None):
"Dump the lookup tables - from opcode value to\n the L1 function pointers (in most cases they are L2 function pointers,\n which doesn't matter, because they have the same signature)\n @param l1_lookup: 2D dict so that\n l1_lookup[string(insn_map)][string(opcode)] == string(L1_function_name)\n all 0..255 opcode values should be set in the dict, so that if 0x0,0x0F\n map-opcode is illegal, then l1_lookup['0x0']['0x0F'] should be set\n to some string indicating that L1 function is undefined.\n\n all_zero_by_map is an optional dict[map] -> {True,False}. If True\n skip emitting the map.\n \n return a dictionary of the array names generated. "
array_names = {}
for insn_map in sorted(l1_lookup.keys()):
arr_name = _get_map_lu_name(name_pfx, insn_map)
if ((all_zero_by_map == None) or (all_zero_by_map[insn_map] == False)):
ild_dump_map_array(l1_lookup[insn_map], arr_name, lu_elem_type, h_file)
array_names[insn_map] = arr_name
return array_names<|docstring|>Dump the lookup tables - from opcode value to
the L1 function pointers (in most cases they are L2 function pointers,
which doesn't matter, because they have the same signature)
@param l1_lookup: 2D dict so that
l1_lookup[string(insn_map)][string(opcode)] == string(L1_function_name)
all 0..255 opcode values should be set in the dict, so that if 0x0,0x0F
map-opcode is illegal, then l1_lookup['0x0']['0x0F'] should be set
to some string indicating that L1 function is undefined.
all_zero_by_map is an optional dict[map] -> {True,False}. If True
skip emitting the map.
return a dictionary of the array names generated.<|endoftext|> |
e1642101710286bf3e869408c083e78fcfd776641a07214e46a2b14c9e352863 | def _gen_intervals_dict(fun_dict):
'If there are keys that map to the same value, we want to unite\n them to intervals in order to have less conditional branches in\n code. For example if fun_dict is something like: \n {0:f1, 1:f1, 2:f2, 3:f2 , ...} then we will generate dict\n {(0,1):f1, (2,3,4,5,6,7):f2} '
sorted_keys = sorted(fun_dict.keys())
cur_int = [sorted_keys[0]]
int_dict = {}
for key in sorted_keys[1:]:
if (fun_dict[key] == fun_dict[(key - 1)]):
cur_int.append(key)
else:
int_dict[tuple(cur_int)] = fun_dict[(key - 1)]
cur_int = [key]
int_dict[tuple(cur_int)] = fun_dict[sorted_keys[(- 1)]]
return int_dict | If there are keys that map to the same value, we want to unite
them to intervals in order to have less conditional branches in
code. For example if fun_dict is something like:
{0:f1, 1:f1, 2:f2, 3:f2 , ...} then we will generate dict
{(0,1):f1, (2,3,4,5,6,7):f2} | pysrc/ild_codegen.py | _gen_intervals_dict | javiereguiluz/xed | 1,261 | python | def _gen_intervals_dict(fun_dict):
'If there are keys that map to the same value, we want to unite\n them to intervals in order to have less conditional branches in\n code. For example if fun_dict is something like: \n {0:f1, 1:f1, 2:f2, 3:f2 , ...} then we will generate dict\n {(0,1):f1, (2,3,4,5,6,7):f2} '
sorted_keys = sorted(fun_dict.keys())
cur_int = [sorted_keys[0]]
int_dict = {}
for key in sorted_keys[1:]:
if (fun_dict[key] == fun_dict[(key - 1)]):
cur_int.append(key)
else:
int_dict[tuple(cur_int)] = fun_dict[(key - 1)]
cur_int = [key]
int_dict[tuple(cur_int)] = fun_dict[sorted_keys[(- 1)]]
return int_dict | def _gen_intervals_dict(fun_dict):
'If there are keys that map to the same value, we want to unite\n them to intervals in order to have less conditional branches in\n code. For example if fun_dict is something like: \n {0:f1, 1:f1, 2:f2, 3:f2 , ...} then we will generate dict\n {(0,1):f1, (2,3,4,5,6,7):f2} '
sorted_keys = sorted(fun_dict.keys())
cur_int = [sorted_keys[0]]
int_dict = {}
for key in sorted_keys[1:]:
if (fun_dict[key] == fun_dict[(key - 1)]):
cur_int.append(key)
else:
int_dict[tuple(cur_int)] = fun_dict[(key - 1)]
cur_int = [key]
int_dict[tuple(cur_int)] = fun_dict[sorted_keys[(- 1)]]
return int_dict<|docstring|>If there are keys that map to the same value, we want to unite
them to intervals in order to have less conditional branches in
code. For example if fun_dict is something like:
{0:f1, 1:f1, 2:f2, 3:f2 , ...} then we will generate dict
{(0,1):f1, (2,3,4,5,6,7):f2}<|endoftext|> |
306d593e1211b479a3cee3b7d9206fca36a620fd2cfbfff5d9531638efc8f55d | def _is_special_op(opname):
'\n Some operands are "special" - like RM: Sometimes we don\'t have modrm,\n but grammar still likes to use RM operand - in this case it is first\n 3 bits of the opcode.\n In this case we can\'t just use regular RM operand scanned with ILD -\n we must check if MODRM exists and if not take 3 LSB nits from opcode.\n This is what getter should do for RM, that\'s why RM is special.\n REG is probably the same.\n is_special_op(opname) checks if the operand has special getter.\n '
return (opname in _special_ops_dict) | Some operands are "special" - like RM: Sometimes we don't have modrm,
but grammar still likes to use RM operand - in this case it is first
3 bits of the opcode.
In this case we can't just use regular RM operand scanned with ILD -
we must check if MODRM exists and if not take 3 LSB nits from opcode.
This is what getter should do for RM, that's why RM is special.
REG is probably the same.
is_special_op(opname) checks if the operand has special getter. | pysrc/ild_codegen.py | _is_special_op | javiereguiluz/xed | 1,261 | python | def _is_special_op(opname):
'\n Some operands are "special" - like RM: Sometimes we don\'t have modrm,\n but grammar still likes to use RM operand - in this case it is first\n 3 bits of the opcode.\n In this case we can\'t just use regular RM operand scanned with ILD -\n we must check if MODRM exists and if not take 3 LSB nits from opcode.\n This is what getter should do for RM, that\'s why RM is special.\n REG is probably the same.\n is_special_op(opname) checks if the operand has special getter.\n '
return (opname in _special_ops_dict) | def _is_special_op(opname):
'\n Some operands are "special" - like RM: Sometimes we don\'t have modrm,\n but grammar still likes to use RM operand - in this case it is first\n 3 bits of the opcode.\n In this case we can\'t just use regular RM operand scanned with ILD -\n we must check if MODRM exists and if not take 3 LSB nits from opcode.\n This is what getter should do for RM, that\'s why RM is special.\n REG is probably the same.\n is_special_op(opname) checks if the operand has special getter.\n '
return (opname in _special_ops_dict)<|docstring|>Some operands are "special" - like RM: Sometimes we don't have modrm,
but grammar still likes to use RM operand - in this case it is first
3 bits of the opcode.
In this case we can't just use regular RM operand scanned with ILD -
we must check if MODRM exists and if not take 3 LSB nits from opcode.
This is what getter should do for RM, that's why RM is special.
REG is probably the same.
is_special_op(opname) checks if the operand has special getter.<|endoftext|> |
b18a795e51f083b14c17ca6050c096a11221b35c53fe25a1da32a53c1fce2c6c | def _get_special_op_getter_fn(opname):
"\n Returns special operand's getter name.\n See is_special_op comment.\n "
return _special_ops_dict[opname] | Returns special operand's getter name.
See is_special_op comment. | pysrc/ild_codegen.py | _get_special_op_getter_fn | javiereguiluz/xed | 1,261 | python | def _get_special_op_getter_fn(opname):
"\n Returns special operand's getter name.\n See is_special_op comment.\n "
return _special_ops_dict[opname] | def _get_special_op_getter_fn(opname):
"\n Returns special operand's getter name.\n See is_special_op comment.\n "
return _special_ops_dict[opname]<|docstring|>Returns special operand's getter name.
See is_special_op comment.<|endoftext|> |
b2e8bd6152f75864fcc08a5e85c969fa38fe6519a31cad6826eb7792a9a77ea2 | def emit_ild_access_call(opname, data_name, eoasz_set=False):
"\n @param opname: the name of the operand of xed grammar.\n @type opname: string\n\n @param data_name: the name of xed_decoded_inst_t* pointer\n @type data_name: string\n\n @param eoasz_set: when doing static decoding EOSZ and EASZ are not\n yet set correctly in the operands structure and we have to use\n special ILD getters to get their correct value.\n After dynamic decoding (and before we do operands decoding) EOSZ\n and EASZ are already set and we can use regular getter for them.\n @type eoasz_set: boolean\n\n IMPORTANT: EASZ and EOSZ cannot be computed with this function,\n see how it's done in ild_imm and ild_disp for these two.\n\n @return: C statement (no semicolon, no eol) that returns the\n value of corresponding operand.\n "
if ((opname in ['EASZ', 'EOSZ']) and (not eoasz_set)):
ildutil.ild_err(('No simple getter for %s operand' % opname))
elif _is_special_op(opname):
getter_fn = _get_special_op_getter_fn(opname)
else:
getter_fn = operand_storage.get_op_getter_fn(opname)
call_str = ('%s(%s)' % (getter_fn, data_name))
return call_str | @param opname: the name of the operand of xed grammar.
@type opname: string
@param data_name: the name of xed_decoded_inst_t* pointer
@type data_name: string
@param eoasz_set: when doing static decoding EOSZ and EASZ are not
yet set correctly in the operands structure and we have to use
special ILD getters to get their correct value.
After dynamic decoding (and before we do operands decoding) EOSZ
and EASZ are already set and we can use regular getter for them.
@type eoasz_set: boolean
IMPORTANT: EASZ and EOSZ cannot be computed with this function,
see how it's done in ild_imm and ild_disp for these two.
@return: C statement (no semicolon, no eol) that returns the
value of corresponding operand. | pysrc/ild_codegen.py | emit_ild_access_call | javiereguiluz/xed | 1,261 | python | def emit_ild_access_call(opname, data_name, eoasz_set=False):
"\n @param opname: the name of the operand of xed grammar.\n @type opname: string\n\n @param data_name: the name of xed_decoded_inst_t* pointer\n @type data_name: string\n\n @param eoasz_set: when doing static decoding EOSZ and EASZ are not\n yet set correctly in the operands structure and we have to use\n special ILD getters to get their correct value.\n After dynamic decoding (and before we do operands decoding) EOSZ\n and EASZ are already set and we can use regular getter for them.\n @type eoasz_set: boolean\n\n IMPORTANT: EASZ and EOSZ cannot be computed with this function,\n see how it's done in ild_imm and ild_disp for these two.\n\n @return: C statement (no semicolon, no eol) that returns the\n value of corresponding operand.\n "
if ((opname in ['EASZ', 'EOSZ']) and (not eoasz_set)):
ildutil.ild_err(('No simple getter for %s operand' % opname))
elif _is_special_op(opname):
getter_fn = _get_special_op_getter_fn(opname)
else:
getter_fn = operand_storage.get_op_getter_fn(opname)
call_str = ('%s(%s)' % (getter_fn, data_name))
return call_str | def emit_ild_access_call(opname, data_name, eoasz_set=False):
"\n @param opname: the name of the operand of xed grammar.\n @type opname: string\n\n @param data_name: the name of xed_decoded_inst_t* pointer\n @type data_name: string\n\n @param eoasz_set: when doing static decoding EOSZ and EASZ are not\n yet set correctly in the operands structure and we have to use\n special ILD getters to get their correct value.\n After dynamic decoding (and before we do operands decoding) EOSZ\n and EASZ are already set and we can use regular getter for them.\n @type eoasz_set: boolean\n\n IMPORTANT: EASZ and EOSZ cannot be computed with this function,\n see how it's done in ild_imm and ild_disp for these two.\n\n @return: C statement (no semicolon, no eol) that returns the\n value of corresponding operand.\n "
if ((opname in ['EASZ', 'EOSZ']) and (not eoasz_set)):
ildutil.ild_err(('No simple getter for %s operand' % opname))
elif _is_special_op(opname):
getter_fn = _get_special_op_getter_fn(opname)
else:
getter_fn = operand_storage.get_op_getter_fn(opname)
call_str = ('%s(%s)' % (getter_fn, data_name))
return call_str<|docstring|>@param opname: the name of the operand of xed grammar.
@type opname: string
@param data_name: the name of xed_decoded_inst_t* pointer
@type data_name: string
@param eoasz_set: when doing static decoding EOSZ and EASZ are not
yet set correctly in the operands structure and we have to use
special ILD getters to get their correct value.
After dynamic decoding (and before we do operands decoding) EOSZ
and EASZ are already set and we can use regular getter for them.
@type eoasz_set: boolean
IMPORTANT: EASZ and EOSZ cannot be computed with this function,
see how it's done in ild_imm and ild_disp for these two.
@return: C statement (no semicolon, no eol) that returns the
value of corresponding operand.<|endoftext|> |
788d54e2bb725dc4676db1f11c825e9510ba8b093bf372f5876a6a810d5fc584 | def test_init_optimiser():
'Test init method of Optimiser class.'
with pytest.warns(UserWarning) as record:
optimiser = Optimiser()
assert (len(record) == 1)
assert (not optimiser.scoring)
assert (optimiser.n_folds == 2)
assert (optimiser.random_state == 1)
assert (optimiser.to_path == 'save')
assert optimiser.verbose | Test init method of Optimiser class. | tests/test_optimiser.py | test_init_optimiser | manugarri/MLBox | 1,382 | python | def test_init_optimiser():
with pytest.warns(UserWarning) as record:
optimiser = Optimiser()
assert (len(record) == 1)
assert (not optimiser.scoring)
assert (optimiser.n_folds == 2)
assert (optimiser.random_state == 1)
assert (optimiser.to_path == 'save')
assert optimiser.verbose | def test_init_optimiser():
with pytest.warns(UserWarning) as record:
optimiser = Optimiser()
assert (len(record) == 1)
assert (not optimiser.scoring)
assert (optimiser.n_folds == 2)
assert (optimiser.random_state == 1)
assert (optimiser.to_path == 'save')
assert optimiser.verbose<|docstring|>Test init method of Optimiser class.<|endoftext|> |
a3dc28e053391a336527031c768a4791dec350a70c84336bf8c88ea03767638f | def test_get_params_optimiser():
'Test get_params method of optimiser class.'
with pytest.warns(UserWarning) as record:
optimiser = Optimiser()
assert (len(record) == 1)
dict = {'scoring': None, 'n_folds': 2, 'random_state': 1, 'to_path': 'save', 'verbose': True}
assert (optimiser.get_params() == dict) | Test get_params method of optimiser class. | tests/test_optimiser.py | test_get_params_optimiser | manugarri/MLBox | 1,382 | python | def test_get_params_optimiser():
with pytest.warns(UserWarning) as record:
optimiser = Optimiser()
assert (len(record) == 1)
dict = {'scoring': None, 'n_folds': 2, 'random_state': 1, 'to_path': 'save', 'verbose': True}
assert (optimiser.get_params() == dict) | def test_get_params_optimiser():
with pytest.warns(UserWarning) as record:
optimiser = Optimiser()
assert (len(record) == 1)
dict = {'scoring': None, 'n_folds': 2, 'random_state': 1, 'to_path': 'save', 'verbose': True}
assert (optimiser.get_params() == dict)<|docstring|>Test get_params method of optimiser class.<|endoftext|> |
375c61fd643ef1b9959fd79f27d5141a1c8eed94ae19f5ffab1f0d5bde33fe11 | def test_set_params_optimiser():
'Test set_params method of Optimiser class.'
with pytest.warns(UserWarning) as record:
optimiser = Optimiser()
assert (len(record) == 1)
optimiser.set_params(scoring='accuracy')
assert (optimiser.scoring == 'accuracy')
optimiser.set_params(n_folds=3)
assert (optimiser.n_folds == 3)
optimiser.set_params(random_state=2)
assert (optimiser.random_state == 2)
optimiser.set_params(to_path='name')
assert (optimiser.to_path == 'name')
optimiser.set_params(verbose=False)
assert (not optimiser.verbose)
with pytest.warns(UserWarning) as record:
optimiser.set_params(wrong_key=3)
assert (len(record) == 1) | Test set_params method of Optimiser class. | tests/test_optimiser.py | test_set_params_optimiser | manugarri/MLBox | 1,382 | python | def test_set_params_optimiser():
with pytest.warns(UserWarning) as record:
optimiser = Optimiser()
assert (len(record) == 1)
optimiser.set_params(scoring='accuracy')
assert (optimiser.scoring == 'accuracy')
optimiser.set_params(n_folds=3)
assert (optimiser.n_folds == 3)
optimiser.set_params(random_state=2)
assert (optimiser.random_state == 2)
optimiser.set_params(to_path='name')
assert (optimiser.to_path == 'name')
optimiser.set_params(verbose=False)
assert (not optimiser.verbose)
with pytest.warns(UserWarning) as record:
optimiser.set_params(wrong_key=3)
assert (len(record) == 1) | def test_set_params_optimiser():
with pytest.warns(UserWarning) as record:
optimiser = Optimiser()
assert (len(record) == 1)
optimiser.set_params(scoring='accuracy')
assert (optimiser.scoring == 'accuracy')
optimiser.set_params(n_folds=3)
assert (optimiser.n_folds == 3)
optimiser.set_params(random_state=2)
assert (optimiser.random_state == 2)
optimiser.set_params(to_path='name')
assert (optimiser.to_path == 'name')
optimiser.set_params(verbose=False)
assert (not optimiser.verbose)
with pytest.warns(UserWarning) as record:
optimiser.set_params(wrong_key=3)
assert (len(record) == 1)<|docstring|>Test set_params method of Optimiser class.<|endoftext|> |
2406fd396ed59f9a510a5135dc8933816bb9c24969fa409ffcbe4a67aa94a3af | def test_evaluate_classification_optimiser():
'Test evaluate method of Optimiser class for classication.'
reader = Reader(sep=',')
dict = reader.train_test_split(Lpath=['data_for_tests/train.csv', 'data_for_tests/test.csv'], target_name='Survived')
drift_thresholder = Drift_thresholder()
drift_thresholder = drift_thresholder.fit_transform(dict)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring=None, n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert ((- np.Inf) <= score)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='roc_auc', n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert (0.0 <= score <= 1.0)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='wrong_scoring', n_folds=3)
assert (len(record) == 1)
with pytest.warns(UserWarning) as record:
score = opt.evaluate(None, dict)
assert (opt.scoring == 'neg_log_loss') | Test evaluate method of Optimiser class for classication. | tests/test_optimiser.py | test_evaluate_classification_optimiser | manugarri/MLBox | 1,382 | python | def test_evaluate_classification_optimiser():
reader = Reader(sep=',')
dict = reader.train_test_split(Lpath=['data_for_tests/train.csv', 'data_for_tests/test.csv'], target_name='Survived')
drift_thresholder = Drift_thresholder()
drift_thresholder = drift_thresholder.fit_transform(dict)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring=None, n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert ((- np.Inf) <= score)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='roc_auc', n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert (0.0 <= score <= 1.0)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='wrong_scoring', n_folds=3)
assert (len(record) == 1)
with pytest.warns(UserWarning) as record:
score = opt.evaluate(None, dict)
assert (opt.scoring == 'neg_log_loss') | def test_evaluate_classification_optimiser():
reader = Reader(sep=',')
dict = reader.train_test_split(Lpath=['data_for_tests/train.csv', 'data_for_tests/test.csv'], target_name='Survived')
drift_thresholder = Drift_thresholder()
drift_thresholder = drift_thresholder.fit_transform(dict)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring=None, n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert ((- np.Inf) <= score)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='roc_auc', n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert (0.0 <= score <= 1.0)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='wrong_scoring', n_folds=3)
assert (len(record) == 1)
with pytest.warns(UserWarning) as record:
score = opt.evaluate(None, dict)
assert (opt.scoring == 'neg_log_loss')<|docstring|>Test evaluate method of Optimiser class for classication.<|endoftext|> |
e3582b5bf1b0b37cf53a9e8fd4698a51785e6edeeee18cc9060bc5408fa951a0 | def test_evaluate_regression_optimiser():
'Test evaluate method of Optimiser class for regression.'
reader = Reader(sep=',')
dict = reader.train_test_split(Lpath=['data_for_tests/train_regression.csv', 'data_for_tests/test_regression.csv'], target_name='SalePrice')
drift_thresholder = Drift_thresholder()
drift_thresholder = drift_thresholder.fit_transform(dict)
mape = make_scorer((lambda y_true, y_pred: ((100 * np.sum((np.abs((y_true - y_pred)) / y_true))) / len(y_true))), greater_is_better=False, needs_proba=False)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring=mape, n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert ((- np.Inf) <= score)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring=None, n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert ((- np.Inf) <= score)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='wrong_scoring', n_folds=3)
assert (len(record) == 1)
with pytest.warns(UserWarning) as record:
score = opt.evaluate(None, dict)
assert ((- np.Inf) <= score) | Test evaluate method of Optimiser class for regression. | tests/test_optimiser.py | test_evaluate_regression_optimiser | manugarri/MLBox | 1,382 | python | def test_evaluate_regression_optimiser():
reader = Reader(sep=',')
dict = reader.train_test_split(Lpath=['data_for_tests/train_regression.csv', 'data_for_tests/test_regression.csv'], target_name='SalePrice')
drift_thresholder = Drift_thresholder()
drift_thresholder = drift_thresholder.fit_transform(dict)
mape = make_scorer((lambda y_true, y_pred: ((100 * np.sum((np.abs((y_true - y_pred)) / y_true))) / len(y_true))), greater_is_better=False, needs_proba=False)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring=mape, n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert ((- np.Inf) <= score)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring=None, n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert ((- np.Inf) <= score)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='wrong_scoring', n_folds=3)
assert (len(record) == 1)
with pytest.warns(UserWarning) as record:
score = opt.evaluate(None, dict)
assert ((- np.Inf) <= score) | def test_evaluate_regression_optimiser():
reader = Reader(sep=',')
dict = reader.train_test_split(Lpath=['data_for_tests/train_regression.csv', 'data_for_tests/test_regression.csv'], target_name='SalePrice')
drift_thresholder = Drift_thresholder()
drift_thresholder = drift_thresholder.fit_transform(dict)
mape = make_scorer((lambda y_true, y_pred: ((100 * np.sum((np.abs((y_true - y_pred)) / y_true))) / len(y_true))), greater_is_better=False, needs_proba=False)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring=mape, n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert ((- np.Inf) <= score)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring=None, n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert ((- np.Inf) <= score)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='wrong_scoring', n_folds=3)
assert (len(record) == 1)
with pytest.warns(UserWarning) as record:
score = opt.evaluate(None, dict)
assert ((- np.Inf) <= score)<|docstring|>Test evaluate method of Optimiser class for regression.<|endoftext|> |
f20dc53e0fa183f2832d84701c35a0b5ff18e6f82a8e6242df662162547e0bd4 | def test_evaluate_and_optimise_classification():
'Test evaluate_and_optimise method of Optimiser class.'
reader = Reader(sep=',')
dict = reader.train_test_split(Lpath=['data_for_tests/train.csv', 'data_for_tests/test.csv'], target_name='Survived')
drift_thresholder = Drift_thresholder()
drift_thresholder = drift_thresholder.fit_transform(dict)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='accuracy', n_folds=3)
assert (len(record) == 1)
dict_error = dict.copy()
dict_error['target'] = dict_error['target'].astype(str)
with pytest.raises(ValueError):
score = opt.evaluate(None, dict_error)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='accuracy', n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert (0.0 <= score <= 1.0)
space = {'ne__numerical_strategy': {'search': 'choice', 'space': [0]}, 'ce__strategy': {'search': 'choice', 'space': ['label_encoding']}, 'fs__threshold': {'search': 'uniform', 'space': [0.01, 0.3]}, 'est__max_depth': {'search': 'choice', 'space': [3, 4, 5, 6, 7]}}
best = opt.optimise(space, dict, 1)
assert (type(best) == type(dict)) | Test evaluate_and_optimise method of Optimiser class. | tests/test_optimiser.py | test_evaluate_and_optimise_classification | manugarri/MLBox | 1,382 | python | def test_evaluate_and_optimise_classification():
reader = Reader(sep=',')
dict = reader.train_test_split(Lpath=['data_for_tests/train.csv', 'data_for_tests/test.csv'], target_name='Survived')
drift_thresholder = Drift_thresholder()
drift_thresholder = drift_thresholder.fit_transform(dict)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='accuracy', n_folds=3)
assert (len(record) == 1)
dict_error = dict.copy()
dict_error['target'] = dict_error['target'].astype(str)
with pytest.raises(ValueError):
score = opt.evaluate(None, dict_error)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='accuracy', n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert (0.0 <= score <= 1.0)
space = {'ne__numerical_strategy': {'search': 'choice', 'space': [0]}, 'ce__strategy': {'search': 'choice', 'space': ['label_encoding']}, 'fs__threshold': {'search': 'uniform', 'space': [0.01, 0.3]}, 'est__max_depth': {'search': 'choice', 'space': [3, 4, 5, 6, 7]}}
best = opt.optimise(space, dict, 1)
assert (type(best) == type(dict)) | def test_evaluate_and_optimise_classification():
reader = Reader(sep=',')
dict = reader.train_test_split(Lpath=['data_for_tests/train.csv', 'data_for_tests/test.csv'], target_name='Survived')
drift_thresholder = Drift_thresholder()
drift_thresholder = drift_thresholder.fit_transform(dict)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='accuracy', n_folds=3)
assert (len(record) == 1)
dict_error = dict.copy()
dict_error['target'] = dict_error['target'].astype(str)
with pytest.raises(ValueError):
score = opt.evaluate(None, dict_error)
with pytest.warns(UserWarning) as record:
opt = Optimiser(scoring='accuracy', n_folds=3)
assert (len(record) == 1)
score = opt.evaluate(None, dict)
assert (0.0 <= score <= 1.0)
space = {'ne__numerical_strategy': {'search': 'choice', 'space': [0]}, 'ce__strategy': {'search': 'choice', 'space': ['label_encoding']}, 'fs__threshold': {'search': 'uniform', 'space': [0.01, 0.3]}, 'est__max_depth': {'search': 'choice', 'space': [3, 4, 5, 6, 7]}}
best = opt.optimise(space, dict, 1)
assert (type(best) == type(dict))<|docstring|>Test evaluate_and_optimise method of Optimiser class.<|endoftext|> |
8a680b662a8c410411557f926c70968d80ed006b7000d43ce8f90f8c7ceb11f3 | def solve(self, func):
'Uses the Riemann sum approximation to compute the integral of func.\n\n Args:\n func ([function]): [Possibly vectorised function to integrate over]\n\n Returns:\n [torch.Tensor]: [Integral of func]\n '
return (self.volume * torch.mean(func(self.mesh))) | Uses the Riemann sum approximation to compute the integral of func.
Args:
func ([function]): [Possibly vectorised function to integrate over]
Returns:
[torch.Tensor]: [Integral of func] | src/gaussed/solver/integral_transform/riemann_sum.py | solve | MatthewAlexanderFisher/GaussED | 2 | python | def solve(self, func):
'Uses the Riemann sum approximation to compute the integral of func.\n\n Args:\n func ([function]): [Possibly vectorised function to integrate over]\n\n Returns:\n [torch.Tensor]: [Integral of func]\n '
return (self.volume * torch.mean(func(self.mesh))) | def solve(self, func):
'Uses the Riemann sum approximation to compute the integral of func.\n\n Args:\n func ([function]): [Possibly vectorised function to integrate over]\n\n Returns:\n [torch.Tensor]: [Integral of func]\n '
return (self.volume * torch.mean(func(self.mesh)))<|docstring|>Uses the Riemann sum approximation to compute the integral of func.
Args:
func ([function]): [Possibly vectorised function to integrate over]
Returns:
[torch.Tensor]: [Integral of func]<|endoftext|> |
d6f55452833eb6011a63cbdaa767a614a937f0cb8da5a9c74b38b3d9b21faf6e | def line_integral_basis(self, func, m):
'Uses the Riemann sum approximation to compute the integral of func over a flattened mesh.\n\n Args:\n func ([function]): [Function to integrate over]\n m ([int, list]): [Number of basis functions]\n\n Returns:\n [torch.Tensor]: [Integral of func]\n '
return (self.volume * torch.mean(func(self.mesh, m).T, dim=1)) | Uses the Riemann sum approximation to compute the integral of func over a flattened mesh.
Args:
func ([function]): [Function to integrate over]
m ([int, list]): [Number of basis functions]
Returns:
[torch.Tensor]: [Integral of func] | src/gaussed/solver/integral_transform/riemann_sum.py | line_integral_basis | MatthewAlexanderFisher/GaussED | 2 | python | def line_integral_basis(self, func, m):
'Uses the Riemann sum approximation to compute the integral of func over a flattened mesh.\n\n Args:\n func ([function]): [Function to integrate over]\n m ([int, list]): [Number of basis functions]\n\n Returns:\n [torch.Tensor]: [Integral of func]\n '
return (self.volume * torch.mean(func(self.mesh, m).T, dim=1)) | def line_integral_basis(self, func, m):
'Uses the Riemann sum approximation to compute the integral of func over a flattened mesh.\n\n Args:\n func ([function]): [Function to integrate over]\n m ([int, list]): [Number of basis functions]\n\n Returns:\n [torch.Tensor]: [Integral of func]\n '
return (self.volume * torch.mean(func(self.mesh, m).T, dim=1))<|docstring|>Uses the Riemann sum approximation to compute the integral of func over a flattened mesh.
Args:
func ([function]): [Function to integrate over]
m ([int, list]): [Number of basis functions]
Returns:
[torch.Tensor]: [Integral of func]<|endoftext|> |
4c3a6d403515efc09e355083d221b914a9a63cdfd83c21e76ce0679f7bfff326 | def print_reversed_list_integer(my_list=[]):
'\n prints the reverse of a given list\n '
if (type(my_list) is list):
new_l = my_list[0:]
new_l.reverse()
for i in range(len(new_l)):
print('{:d}'.format(new_l[i])) | prints the reverse of a given list | 0x03-python-data_structures/3-print_reversed_list_integer.py | print_reversed_list_integer | BennettDixon/holbertonschool-higher_level_programming | 1 | python | def print_reversed_list_integer(my_list=[]):
'\n \n '
if (type(my_list) is list):
new_l = my_list[0:]
new_l.reverse()
for i in range(len(new_l)):
print('{:d}'.format(new_l[i])) | def print_reversed_list_integer(my_list=[]):
'\n \n '
if (type(my_list) is list):
new_l = my_list[0:]
new_l.reverse()
for i in range(len(new_l)):
print('{:d}'.format(new_l[i]))<|docstring|>prints the reverse of a given list<|endoftext|> |
96872c7b0e422a50ecd4cbc8cb5aae714749676ef5f5e809c59a56434cb0590e | def filter(self, npc):
"\n Get NPC's shop.\n\n Args:\n npc: (string) NPC's key.\n "
return self.objects.filter(npc=npc) | Get NPC's shop.
Args:
npc: (string) NPC's key. | muddery/worlddata/dao/npc_shops_mapper.py | filter | noahzaozao/muddery | 0 | python | def filter(self, npc):
"\n Get NPC's shop.\n\n Args:\n npc: (string) NPC's key.\n "
return self.objects.filter(npc=npc) | def filter(self, npc):
"\n Get NPC's shop.\n\n Args:\n npc: (string) NPC's key.\n "
return self.objects.filter(npc=npc)<|docstring|>Get NPC's shop.
Args:
npc: (string) NPC's key.<|endoftext|> |
d72120a96352246e60d35bc146952d0f2e6850e821bec9a39ff624552e9762e0 | def mean(v):
'\n Return the mean of the elements of `v`.\n\n We define the mean of the empty list to be the (symbolic) NaN,\n following the convention of MATLAB, Scipy, and R.\n\n This function is deprecated. Use ``numpy.mean`` or ``numpy.nanmean``\n instead.\n\n INPUT:\n\n - `v` -- a list of numbers\n\n OUTPUT:\n\n - a number\n\n EXAMPLES::\n\n sage: mean([pi, e])\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.mean is deprecated; use numpy.mean or numpy.nanmean instead\n See https://trac.sagemath.org/29662 for details.\n 1/2*pi + 1/2*e\n sage: mean([])\n NaN\n sage: mean([I, sqrt(2), 3/5])\n 1/3*sqrt(2) + 1/3*I + 1/5\n sage: mean([RIF(1.0103,1.0103), RIF(2)])\n 1.5051500000000000?\n sage: mean(range(4))\n 3/2\n sage: v = stats.TimeSeries([1..100])\n sage: mean(v)\n 50.5\n '
deprecation(29662, 'sage.stats.basic_stats.mean is deprecated; use numpy.mean or numpy.nanmean instead')
if hasattr(v, 'mean'):
return v.mean()
if (not v):
return NaN
s = sum(v)
if isinstance(s, int):
return (s / ZZ(len(v)))
return (s / len(v)) | Return the mean of the elements of `v`.
We define the mean of the empty list to be the (symbolic) NaN,
following the convention of MATLAB, Scipy, and R.
This function is deprecated. Use ``numpy.mean`` or ``numpy.nanmean``
instead.
INPUT:
- `v` -- a list of numbers
OUTPUT:
- a number
EXAMPLES::
sage: mean([pi, e])
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.mean is deprecated; use numpy.mean or numpy.nanmean instead
See https://trac.sagemath.org/29662 for details.
1/2*pi + 1/2*e
sage: mean([])
NaN
sage: mean([I, sqrt(2), 3/5])
1/3*sqrt(2) + 1/3*I + 1/5
sage: mean([RIF(1.0103,1.0103), RIF(2)])
1.5051500000000000?
sage: mean(range(4))
3/2
sage: v = stats.TimeSeries([1..100])
sage: mean(v)
50.5 | src/sage/stats/basic_stats.py | mean | LaisRast/sage | 1,742 | python | def mean(v):
'\n Return the mean of the elements of `v`.\n\n We define the mean of the empty list to be the (symbolic) NaN,\n following the convention of MATLAB, Scipy, and R.\n\n This function is deprecated. Use ``numpy.mean`` or ``numpy.nanmean``\n instead.\n\n INPUT:\n\n - `v` -- a list of numbers\n\n OUTPUT:\n\n - a number\n\n EXAMPLES::\n\n sage: mean([pi, e])\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.mean is deprecated; use numpy.mean or numpy.nanmean instead\n See https://trac.sagemath.org/29662 for details.\n 1/2*pi + 1/2*e\n sage: mean([])\n NaN\n sage: mean([I, sqrt(2), 3/5])\n 1/3*sqrt(2) + 1/3*I + 1/5\n sage: mean([RIF(1.0103,1.0103), RIF(2)])\n 1.5051500000000000?\n sage: mean(range(4))\n 3/2\n sage: v = stats.TimeSeries([1..100])\n sage: mean(v)\n 50.5\n '
deprecation(29662, 'sage.stats.basic_stats.mean is deprecated; use numpy.mean or numpy.nanmean instead')
if hasattr(v, 'mean'):
return v.mean()
if (not v):
return NaN
s = sum(v)
if isinstance(s, int):
return (s / ZZ(len(v)))
return (s / len(v)) | def mean(v):
'\n Return the mean of the elements of `v`.\n\n We define the mean of the empty list to be the (symbolic) NaN,\n following the convention of MATLAB, Scipy, and R.\n\n This function is deprecated. Use ``numpy.mean`` or ``numpy.nanmean``\n instead.\n\n INPUT:\n\n - `v` -- a list of numbers\n\n OUTPUT:\n\n - a number\n\n EXAMPLES::\n\n sage: mean([pi, e])\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.mean is deprecated; use numpy.mean or numpy.nanmean instead\n See https://trac.sagemath.org/29662 for details.\n 1/2*pi + 1/2*e\n sage: mean([])\n NaN\n sage: mean([I, sqrt(2), 3/5])\n 1/3*sqrt(2) + 1/3*I + 1/5\n sage: mean([RIF(1.0103,1.0103), RIF(2)])\n 1.5051500000000000?\n sage: mean(range(4))\n 3/2\n sage: v = stats.TimeSeries([1..100])\n sage: mean(v)\n 50.5\n '
deprecation(29662, 'sage.stats.basic_stats.mean is deprecated; use numpy.mean or numpy.nanmean instead')
if hasattr(v, 'mean'):
return v.mean()
if (not v):
return NaN
s = sum(v)
if isinstance(s, int):
return (s / ZZ(len(v)))
return (s / len(v))<|docstring|>Return the mean of the elements of `v`.
We define the mean of the empty list to be the (symbolic) NaN,
following the convention of MATLAB, Scipy, and R.
This function is deprecated. Use ``numpy.mean`` or ``numpy.nanmean``
instead.
INPUT:
- `v` -- a list of numbers
OUTPUT:
- a number
EXAMPLES::
sage: mean([pi, e])
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.mean is deprecated; use numpy.mean or numpy.nanmean instead
See https://trac.sagemath.org/29662 for details.
1/2*pi + 1/2*e
sage: mean([])
NaN
sage: mean([I, sqrt(2), 3/5])
1/3*sqrt(2) + 1/3*I + 1/5
sage: mean([RIF(1.0103,1.0103), RIF(2)])
1.5051500000000000?
sage: mean(range(4))
3/2
sage: v = stats.TimeSeries([1..100])
sage: mean(v)
50.5<|endoftext|> |
962d4e6530a134a27b3f9ddf06365705e1ffaa433208794179b84bc7585b33e3 | def mode(v):
"\n Return the mode of `v`.\n\n The mode is the list of the most frequently occurring\n elements in `v`. If `n` is the most times that any element occurs\n in `v`, then the mode is the list of elements of `v` that\n occur `n` times. The list is sorted if possible.\n\n This function is deprecated. Use ``scipy.stats.mode`` or\n ``statistics.mode`` instead.\n\n .. NOTE::\n\n The elements of `v` must be hashable.\n\n INPUT:\n\n - `v` -- a list\n\n OUTPUT:\n\n - a list (sorted if possible)\n\n EXAMPLES::\n\n sage: v = [1,2,4,1,6,2,6,7,1]\n sage: mode(v)\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.mode is deprecated; use scipy.stats.mode or statistics.mode instead\n See https://trac.sagemath.org/29662 for details.\n [1]\n sage: v.count(1)\n 3\n sage: mode([])\n []\n\n sage: mode([1,2,3,4,5])\n [1, 2, 3, 4, 5]\n sage: mode([3,1,2,1,2,3])\n [1, 2, 3]\n sage: mode([0, 2, 7, 7, 13, 20, 2, 13])\n [2, 7, 13]\n\n sage: mode(['sage', 'four', 'I', 'three', 'sage', 'pi'])\n ['sage']\n\n sage: class MyClass:\n ....: def mode(self):\n ....: return [1]\n sage: stats.mode(MyClass())\n [1]\n "
deprecation(29662, 'sage.stats.basic_stats.mode is deprecated; use scipy.stats.mode or statistics.mode instead')
if hasattr(v, 'mode'):
return v.mode()
if (not v):
return v
freq = {}
for i in v:
if (i in freq):
freq[i] += 1
else:
freq[i] = 1
n = max(freq.values())
try:
return sorted((u for (u, f) in freq.items() if (f == n)))
except TypeError:
return [u for (u, f) in freq.items() if (f == n)] | Return the mode of `v`.
The mode is the list of the most frequently occurring
elements in `v`. If `n` is the most times that any element occurs
in `v`, then the mode is the list of elements of `v` that
occur `n` times. The list is sorted if possible.
This function is deprecated. Use ``scipy.stats.mode`` or
``statistics.mode`` instead.
.. NOTE::
The elements of `v` must be hashable.
INPUT:
- `v` -- a list
OUTPUT:
- a list (sorted if possible)
EXAMPLES::
sage: v = [1,2,4,1,6,2,6,7,1]
sage: mode(v)
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.mode is deprecated; use scipy.stats.mode or statistics.mode instead
See https://trac.sagemath.org/29662 for details.
[1]
sage: v.count(1)
3
sage: mode([])
[]
sage: mode([1,2,3,4,5])
[1, 2, 3, 4, 5]
sage: mode([3,1,2,1,2,3])
[1, 2, 3]
sage: mode([0, 2, 7, 7, 13, 20, 2, 13])
[2, 7, 13]
sage: mode(['sage', 'four', 'I', 'three', 'sage', 'pi'])
['sage']
sage: class MyClass:
....: def mode(self):
....: return [1]
sage: stats.mode(MyClass())
[1] | src/sage/stats/basic_stats.py | mode | LaisRast/sage | 1,742 | python | def mode(v):
"\n Return the mode of `v`.\n\n The mode is the list of the most frequently occurring\n elements in `v`. If `n` is the most times that any element occurs\n in `v`, then the mode is the list of elements of `v` that\n occur `n` times. The list is sorted if possible.\n\n This function is deprecated. Use ``scipy.stats.mode`` or\n ``statistics.mode`` instead.\n\n .. NOTE::\n\n The elements of `v` must be hashable.\n\n INPUT:\n\n - `v` -- a list\n\n OUTPUT:\n\n - a list (sorted if possible)\n\n EXAMPLES::\n\n sage: v = [1,2,4,1,6,2,6,7,1]\n sage: mode(v)\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.mode is deprecated; use scipy.stats.mode or statistics.mode instead\n See https://trac.sagemath.org/29662 for details.\n [1]\n sage: v.count(1)\n 3\n sage: mode([])\n []\n\n sage: mode([1,2,3,4,5])\n [1, 2, 3, 4, 5]\n sage: mode([3,1,2,1,2,3])\n [1, 2, 3]\n sage: mode([0, 2, 7, 7, 13, 20, 2, 13])\n [2, 7, 13]\n\n sage: mode(['sage', 'four', 'I', 'three', 'sage', 'pi'])\n ['sage']\n\n sage: class MyClass:\n ....: def mode(self):\n ....: return [1]\n sage: stats.mode(MyClass())\n [1]\n "
deprecation(29662, 'sage.stats.basic_stats.mode is deprecated; use scipy.stats.mode or statistics.mode instead')
if hasattr(v, 'mode'):
return v.mode()
if (not v):
return v
freq = {}
for i in v:
if (i in freq):
freq[i] += 1
else:
freq[i] = 1
n = max(freq.values())
try:
return sorted((u for (u, f) in freq.items() if (f == n)))
except TypeError:
return [u for (u, f) in freq.items() if (f == n)] | def mode(v):
"\n Return the mode of `v`.\n\n The mode is the list of the most frequently occurring\n elements in `v`. If `n` is the most times that any element occurs\n in `v`, then the mode is the list of elements of `v` that\n occur `n` times. The list is sorted if possible.\n\n This function is deprecated. Use ``scipy.stats.mode`` or\n ``statistics.mode`` instead.\n\n .. NOTE::\n\n The elements of `v` must be hashable.\n\n INPUT:\n\n - `v` -- a list\n\n OUTPUT:\n\n - a list (sorted if possible)\n\n EXAMPLES::\n\n sage: v = [1,2,4,1,6,2,6,7,1]\n sage: mode(v)\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.mode is deprecated; use scipy.stats.mode or statistics.mode instead\n See https://trac.sagemath.org/29662 for details.\n [1]\n sage: v.count(1)\n 3\n sage: mode([])\n []\n\n sage: mode([1,2,3,4,5])\n [1, 2, 3, 4, 5]\n sage: mode([3,1,2,1,2,3])\n [1, 2, 3]\n sage: mode([0, 2, 7, 7, 13, 20, 2, 13])\n [2, 7, 13]\n\n sage: mode(['sage', 'four', 'I', 'three', 'sage', 'pi'])\n ['sage']\n\n sage: class MyClass:\n ....: def mode(self):\n ....: return [1]\n sage: stats.mode(MyClass())\n [1]\n "
deprecation(29662, 'sage.stats.basic_stats.mode is deprecated; use scipy.stats.mode or statistics.mode instead')
if hasattr(v, 'mode'):
return v.mode()
if (not v):
return v
freq = {}
for i in v:
if (i in freq):
freq[i] += 1
else:
freq[i] = 1
n = max(freq.values())
try:
return sorted((u for (u, f) in freq.items() if (f == n)))
except TypeError:
return [u for (u, f) in freq.items() if (f == n)]<|docstring|>Return the mode of `v`.
The mode is the list of the most frequently occurring
elements in `v`. If `n` is the most times that any element occurs
in `v`, then the mode is the list of elements of `v` that
occur `n` times. The list is sorted if possible.
This function is deprecated. Use ``scipy.stats.mode`` or
``statistics.mode`` instead.
.. NOTE::
The elements of `v` must be hashable.
INPUT:
- `v` -- a list
OUTPUT:
- a list (sorted if possible)
EXAMPLES::
sage: v = [1,2,4,1,6,2,6,7,1]
sage: mode(v)
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.mode is deprecated; use scipy.stats.mode or statistics.mode instead
See https://trac.sagemath.org/29662 for details.
[1]
sage: v.count(1)
3
sage: mode([])
[]
sage: mode([1,2,3,4,5])
[1, 2, 3, 4, 5]
sage: mode([3,1,2,1,2,3])
[1, 2, 3]
sage: mode([0, 2, 7, 7, 13, 20, 2, 13])
[2, 7, 13]
sage: mode(['sage', 'four', 'I', 'three', 'sage', 'pi'])
['sage']
sage: class MyClass:
....: def mode(self):
....: return [1]
sage: stats.mode(MyClass())
[1]<|endoftext|> |
259a3fd73739799d66f927c992ed789004def61ecaf9e91f98280b789fe2a5fe | def std(v, bias=False):
'\n Return the standard deviation of the elements of `v`.\n\n We define the standard deviation of the empty list to be NaN,\n following the convention of MATLAB, Scipy, and R.\n\n This function is deprecated. Use ``numpy.std`` or ``numpy.nanstd``\n instead.\n\n INPUT:\n\n - `v` -- a list of numbers\n\n - ``bias`` -- bool (default: False); if False, divide by\n len(v) - 1 instead of len(v)\n to give a less biased estimator (sample) for the\n standard deviation.\n\n OUTPUT:\n\n - a number\n\n EXAMPLES::\n\n sage: std([1..6], bias=True)\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.std is deprecated; use numpy.std or numpy.nanstd instead\n See https://trac.sagemath.org/29662 for details.\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.variance is deprecated; use numpy.var or numpy.nanvar instead\n See https://trac.sagemath.org/29662 for details.\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.mean is deprecated; use numpy.mean or numpy.nanmean instead\n See https://trac.sagemath.org/29662 for details.\n 1/2*sqrt(35/3)\n sage: std([1..6], bias=False)\n sqrt(7/2)\n sage: std([e, pi])\n sqrt(1/2)*abs(pi - e)\n sage: std([])\n NaN\n sage: std([I, sqrt(2), 3/5])\n 1/15*sqrt(1/2)*sqrt((10*sqrt(2) - 5*I - 3)^2\n + (5*sqrt(2) - 10*I + 3)^2 + (5*sqrt(2) + 5*I - 6)^2)\n sage: std([RIF(1.0103, 1.0103), RIF(2)])\n 0.6998235813403261?\n sage: import numpy\n sage: x = numpy.array([1,2,3,4,5])\n sage: std(x, bias=False)\n 1.5811388300841898\n sage: x = stats.TimeSeries([1..100])\n sage: std(x)\n 29.011491975882016\n\n TESTS::\n\n sage: data = [random() for i in [1 .. 20]]\n sage: std(data) # random\n 0.29487771726609185\n '
deprecation(29662, 'sage.stats.basic_stats.std is deprecated; use numpy.std or numpy.nanstd instead')
if hasattr(v, 'standard_deviation'):
return v.standard_deviation(bias=bias)
import numpy
if isinstance(v, numpy.ndarray):
if bias:
return v.std()
else:
return v.std(ddof=1)
if (not v):
return NaN
return sqrt(variance(v, bias=bias)) | Return the standard deviation of the elements of `v`.
We define the standard deviation of the empty list to be NaN,
following the convention of MATLAB, Scipy, and R.
This function is deprecated. Use ``numpy.std`` or ``numpy.nanstd``
instead.
INPUT:
- `v` -- a list of numbers
- ``bias`` -- bool (default: False); if False, divide by
len(v) - 1 instead of len(v)
to give a less biased estimator (sample) for the
standard deviation.
OUTPUT:
- a number
EXAMPLES::
sage: std([1..6], bias=True)
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.std is deprecated; use numpy.std or numpy.nanstd instead
See https://trac.sagemath.org/29662 for details.
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.variance is deprecated; use numpy.var or numpy.nanvar instead
See https://trac.sagemath.org/29662 for details.
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.mean is deprecated; use numpy.mean or numpy.nanmean instead
See https://trac.sagemath.org/29662 for details.
1/2*sqrt(35/3)
sage: std([1..6], bias=False)
sqrt(7/2)
sage: std([e, pi])
sqrt(1/2)*abs(pi - e)
sage: std([])
NaN
sage: std([I, sqrt(2), 3/5])
1/15*sqrt(1/2)*sqrt((10*sqrt(2) - 5*I - 3)^2
+ (5*sqrt(2) - 10*I + 3)^2 + (5*sqrt(2) + 5*I - 6)^2)
sage: std([RIF(1.0103, 1.0103), RIF(2)])
0.6998235813403261?
sage: import numpy
sage: x = numpy.array([1,2,3,4,5])
sage: std(x, bias=False)
1.5811388300841898
sage: x = stats.TimeSeries([1..100])
sage: std(x)
29.011491975882016
TESTS::
sage: data = [random() for i in [1 .. 20]]
sage: std(data) # random
0.29487771726609185 | src/sage/stats/basic_stats.py | std | LaisRast/sage | 1,742 | python | def std(v, bias=False):
'\n Return the standard deviation of the elements of `v`.\n\n We define the standard deviation of the empty list to be NaN,\n following the convention of MATLAB, Scipy, and R.\n\n This function is deprecated. Use ``numpy.std`` or ``numpy.nanstd``\n instead.\n\n INPUT:\n\n - `v` -- a list of numbers\n\n - ``bias`` -- bool (default: False); if False, divide by\n len(v) - 1 instead of len(v)\n to give a less biased estimator (sample) for the\n standard deviation.\n\n OUTPUT:\n\n - a number\n\n EXAMPLES::\n\n sage: std([1..6], bias=True)\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.std is deprecated; use numpy.std or numpy.nanstd instead\n See https://trac.sagemath.org/29662 for details.\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.variance is deprecated; use numpy.var or numpy.nanvar instead\n See https://trac.sagemath.org/29662 for details.\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.mean is deprecated; use numpy.mean or numpy.nanmean instead\n See https://trac.sagemath.org/29662 for details.\n 1/2*sqrt(35/3)\n sage: std([1..6], bias=False)\n sqrt(7/2)\n sage: std([e, pi])\n sqrt(1/2)*abs(pi - e)\n sage: std([])\n NaN\n sage: std([I, sqrt(2), 3/5])\n 1/15*sqrt(1/2)*sqrt((10*sqrt(2) - 5*I - 3)^2\n + (5*sqrt(2) - 10*I + 3)^2 + (5*sqrt(2) + 5*I - 6)^2)\n sage: std([RIF(1.0103, 1.0103), RIF(2)])\n 0.6998235813403261?\n sage: import numpy\n sage: x = numpy.array([1,2,3,4,5])\n sage: std(x, bias=False)\n 1.5811388300841898\n sage: x = stats.TimeSeries([1..100])\n sage: std(x)\n 29.011491975882016\n\n TESTS::\n\n sage: data = [random() for i in [1 .. 20]]\n sage: std(data) # random\n 0.29487771726609185\n '
deprecation(29662, 'sage.stats.basic_stats.std is deprecated; use numpy.std or numpy.nanstd instead')
if hasattr(v, 'standard_deviation'):
return v.standard_deviation(bias=bias)
import numpy
if isinstance(v, numpy.ndarray):
if bias:
return v.std()
else:
return v.std(ddof=1)
if (not v):
return NaN
return sqrt(variance(v, bias=bias)) | def std(v, bias=False):
'\n Return the standard deviation of the elements of `v`.\n\n We define the standard deviation of the empty list to be NaN,\n following the convention of MATLAB, Scipy, and R.\n\n This function is deprecated. Use ``numpy.std`` or ``numpy.nanstd``\n instead.\n\n INPUT:\n\n - `v` -- a list of numbers\n\n - ``bias`` -- bool (default: False); if False, divide by\n len(v) - 1 instead of len(v)\n to give a less biased estimator (sample) for the\n standard deviation.\n\n OUTPUT:\n\n - a number\n\n EXAMPLES::\n\n sage: std([1..6], bias=True)\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.std is deprecated; use numpy.std or numpy.nanstd instead\n See https://trac.sagemath.org/29662 for details.\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.variance is deprecated; use numpy.var or numpy.nanvar instead\n See https://trac.sagemath.org/29662 for details.\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.mean is deprecated; use numpy.mean or numpy.nanmean instead\n See https://trac.sagemath.org/29662 for details.\n 1/2*sqrt(35/3)\n sage: std([1..6], bias=False)\n sqrt(7/2)\n sage: std([e, pi])\n sqrt(1/2)*abs(pi - e)\n sage: std([])\n NaN\n sage: std([I, sqrt(2), 3/5])\n 1/15*sqrt(1/2)*sqrt((10*sqrt(2) - 5*I - 3)^2\n + (5*sqrt(2) - 10*I + 3)^2 + (5*sqrt(2) + 5*I - 6)^2)\n sage: std([RIF(1.0103, 1.0103), RIF(2)])\n 0.6998235813403261?\n sage: import numpy\n sage: x = numpy.array([1,2,3,4,5])\n sage: std(x, bias=False)\n 1.5811388300841898\n sage: x = stats.TimeSeries([1..100])\n sage: std(x)\n 29.011491975882016\n\n TESTS::\n\n sage: data = [random() for i in [1 .. 20]]\n sage: std(data) # random\n 0.29487771726609185\n '
deprecation(29662, 'sage.stats.basic_stats.std is deprecated; use numpy.std or numpy.nanstd instead')
if hasattr(v, 'standard_deviation'):
return v.standard_deviation(bias=bias)
import numpy
if isinstance(v, numpy.ndarray):
if bias:
return v.std()
else:
return v.std(ddof=1)
if (not v):
return NaN
return sqrt(variance(v, bias=bias))<|docstring|>Return the standard deviation of the elements of `v`.
We define the standard deviation of the empty list to be NaN,
following the convention of MATLAB, Scipy, and R.
This function is deprecated. Use ``numpy.std`` or ``numpy.nanstd``
instead.
INPUT:
- `v` -- a list of numbers
- ``bias`` -- bool (default: False); if False, divide by
len(v) - 1 instead of len(v)
to give a less biased estimator (sample) for the
standard deviation.
OUTPUT:
- a number
EXAMPLES::
sage: std([1..6], bias=True)
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.std is deprecated; use numpy.std or numpy.nanstd instead
See https://trac.sagemath.org/29662 for details.
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.variance is deprecated; use numpy.var or numpy.nanvar instead
See https://trac.sagemath.org/29662 for details.
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.mean is deprecated; use numpy.mean or numpy.nanmean instead
See https://trac.sagemath.org/29662 for details.
1/2*sqrt(35/3)
sage: std([1..6], bias=False)
sqrt(7/2)
sage: std([e, pi])
sqrt(1/2)*abs(pi - e)
sage: std([])
NaN
sage: std([I, sqrt(2), 3/5])
1/15*sqrt(1/2)*sqrt((10*sqrt(2) - 5*I - 3)^2
+ (5*sqrt(2) - 10*I + 3)^2 + (5*sqrt(2) + 5*I - 6)^2)
sage: std([RIF(1.0103, 1.0103), RIF(2)])
0.6998235813403261?
sage: import numpy
sage: x = numpy.array([1,2,3,4,5])
sage: std(x, bias=False)
1.5811388300841898
sage: x = stats.TimeSeries([1..100])
sage: std(x)
29.011491975882016
TESTS::
sage: data = [random() for i in [1 .. 20]]
sage: std(data) # random
0.29487771726609185<|endoftext|> |
4f7fb6d143f6f5f05b5114033c094c384471b4510a9e8ba04f4013871d0bb7bd | def variance(v, bias=False):
'\n Return the variance of the elements of `v`.\n\n We define the variance of the empty list to be NaN,\n following the convention of MATLAB, Scipy, and R.\n\n This function is deprecated. Use ``numpy.var`` or ``numpy.nanvar``\n instead.\n\n INPUT:\n\n - `v` -- a list of numbers\n\n - ``bias`` -- bool (default: False); if False, divide by\n len(v) - 1 instead of len(v)\n to give a less biased estimator (sample) for the\n standard deviation.\n\n OUTPUT:\n\n - a number\n\n EXAMPLES::\n\n sage: variance([1..6])\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.variance is deprecated; use numpy.var or numpy.nanvar instead\n See https://trac.sagemath.org/29662 for details.\n 7/2\n sage: variance([1..6], bias=True)\n 35/12\n sage: variance([e, pi])\n 1/2*(pi - e)^2\n sage: variance([])\n NaN\n sage: variance([I, sqrt(2), 3/5])\n 1/450*(10*sqrt(2) - 5*I - 3)^2 + 1/450*(5*sqrt(2) - 10*I + 3)^2\n + 1/450*(5*sqrt(2) + 5*I - 6)^2\n sage: variance([RIF(1.0103, 1.0103), RIF(2)])\n 0.4897530450000000?\n sage: import numpy\n sage: x = numpy.array([1,2,3,4,5])\n sage: variance(x, bias=False)\n 2.5\n sage: x = stats.TimeSeries([1..100])\n sage: variance(x)\n 841.6666666666666\n sage: variance(x, bias=True)\n 833.25\n sage: class MyClass:\n ....: def variance(self, bias = False):\n ....: return 1\n sage: stats.variance(MyClass())\n 1\n sage: class SillyPythonList:\n ....: def __init__(self):\n ....: self.__list = [2, 4]\n ....: def __len__(self):\n ....: return len(self.__list)\n ....: def __iter__(self):\n ....: return self.__list.__iter__()\n ....: def mean(self):\n ....: return 3\n sage: R = SillyPythonList()\n sage: variance(R)\n 2\n sage: variance(R, bias=True)\n 1\n\n TESTS:\n\n The performance issue from :trac:`10019` is solved::\n\n sage: variance([1] * 2^18)\n 0\n '
deprecation(29662, 'sage.stats.basic_stats.variance is deprecated; use numpy.var or numpy.nanvar instead')
if hasattr(v, 'variance'):
return v.variance(bias=bias)
import numpy
x = 0
if isinstance(v, numpy.ndarray):
if bias:
return v.var()
else:
return v.var(ddof=1)
if (not v):
return NaN
mu = mean(v)
for vi in v:
x += ((vi - mu) ** 2)
if bias:
if isinstance(x, int):
return (x / ZZ(len(v)))
return (x / len(v))
else:
if isinstance(x, int):
return (x / ZZ((len(v) - 1)))
return (x / (len(v) - 1)) | Return the variance of the elements of `v`.
We define the variance of the empty list to be NaN,
following the convention of MATLAB, Scipy, and R.
This function is deprecated. Use ``numpy.var`` or ``numpy.nanvar``
instead.
INPUT:
- `v` -- a list of numbers
- ``bias`` -- bool (default: False); if False, divide by
len(v) - 1 instead of len(v)
to give a less biased estimator (sample) for the
standard deviation.
OUTPUT:
- a number
EXAMPLES::
sage: variance([1..6])
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.variance is deprecated; use numpy.var or numpy.nanvar instead
See https://trac.sagemath.org/29662 for details.
7/2
sage: variance([1..6], bias=True)
35/12
sage: variance([e, pi])
1/2*(pi - e)^2
sage: variance([])
NaN
sage: variance([I, sqrt(2), 3/5])
1/450*(10*sqrt(2) - 5*I - 3)^2 + 1/450*(5*sqrt(2) - 10*I + 3)^2
+ 1/450*(5*sqrt(2) + 5*I - 6)^2
sage: variance([RIF(1.0103, 1.0103), RIF(2)])
0.4897530450000000?
sage: import numpy
sage: x = numpy.array([1,2,3,4,5])
sage: variance(x, bias=False)
2.5
sage: x = stats.TimeSeries([1..100])
sage: variance(x)
841.6666666666666
sage: variance(x, bias=True)
833.25
sage: class MyClass:
....: def variance(self, bias = False):
....: return 1
sage: stats.variance(MyClass())
1
sage: class SillyPythonList:
....: def __init__(self):
....: self.__list = [2, 4]
....: def __len__(self):
....: return len(self.__list)
....: def __iter__(self):
....: return self.__list.__iter__()
....: def mean(self):
....: return 3
sage: R = SillyPythonList()
sage: variance(R)
2
sage: variance(R, bias=True)
1
TESTS:
The performance issue from :trac:`10019` is solved::
sage: variance([1] * 2^18)
0 | src/sage/stats/basic_stats.py | variance | LaisRast/sage | 1,742 | python | def variance(v, bias=False):
'\n Return the variance of the elements of `v`.\n\n We define the variance of the empty list to be NaN,\n following the convention of MATLAB, Scipy, and R.\n\n This function is deprecated. Use ``numpy.var`` or ``numpy.nanvar``\n instead.\n\n INPUT:\n\n - `v` -- a list of numbers\n\n - ``bias`` -- bool (default: False); if False, divide by\n len(v) - 1 instead of len(v)\n to give a less biased estimator (sample) for the\n standard deviation.\n\n OUTPUT:\n\n - a number\n\n EXAMPLES::\n\n sage: variance([1..6])\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.variance is deprecated; use numpy.var or numpy.nanvar instead\n See https://trac.sagemath.org/29662 for details.\n 7/2\n sage: variance([1..6], bias=True)\n 35/12\n sage: variance([e, pi])\n 1/2*(pi - e)^2\n sage: variance([])\n NaN\n sage: variance([I, sqrt(2), 3/5])\n 1/450*(10*sqrt(2) - 5*I - 3)^2 + 1/450*(5*sqrt(2) - 10*I + 3)^2\n + 1/450*(5*sqrt(2) + 5*I - 6)^2\n sage: variance([RIF(1.0103, 1.0103), RIF(2)])\n 0.4897530450000000?\n sage: import numpy\n sage: x = numpy.array([1,2,3,4,5])\n sage: variance(x, bias=False)\n 2.5\n sage: x = stats.TimeSeries([1..100])\n sage: variance(x)\n 841.6666666666666\n sage: variance(x, bias=True)\n 833.25\n sage: class MyClass:\n ....: def variance(self, bias = False):\n ....: return 1\n sage: stats.variance(MyClass())\n 1\n sage: class SillyPythonList:\n ....: def __init__(self):\n ....: self.__list = [2, 4]\n ....: def __len__(self):\n ....: return len(self.__list)\n ....: def __iter__(self):\n ....: return self.__list.__iter__()\n ....: def mean(self):\n ....: return 3\n sage: R = SillyPythonList()\n sage: variance(R)\n 2\n sage: variance(R, bias=True)\n 1\n\n TESTS:\n\n The performance issue from :trac:`10019` is solved::\n\n sage: variance([1] * 2^18)\n 0\n '
deprecation(29662, 'sage.stats.basic_stats.variance is deprecated; use numpy.var or numpy.nanvar instead')
if hasattr(v, 'variance'):
return v.variance(bias=bias)
import numpy
x = 0
if isinstance(v, numpy.ndarray):
if bias:
return v.var()
else:
return v.var(ddof=1)
if (not v):
return NaN
mu = mean(v)
for vi in v:
x += ((vi - mu) ** 2)
if bias:
if isinstance(x, int):
return (x / ZZ(len(v)))
return (x / len(v))
else:
if isinstance(x, int):
return (x / ZZ((len(v) - 1)))
return (x / (len(v) - 1)) | def variance(v, bias=False):
'\n Return the variance of the elements of `v`.\n\n We define the variance of the empty list to be NaN,\n following the convention of MATLAB, Scipy, and R.\n\n This function is deprecated. Use ``numpy.var`` or ``numpy.nanvar``\n instead.\n\n INPUT:\n\n - `v` -- a list of numbers\n\n - ``bias`` -- bool (default: False); if False, divide by\n len(v) - 1 instead of len(v)\n to give a less biased estimator (sample) for the\n standard deviation.\n\n OUTPUT:\n\n - a number\n\n EXAMPLES::\n\n sage: variance([1..6])\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.variance is deprecated; use numpy.var or numpy.nanvar instead\n See https://trac.sagemath.org/29662 for details.\n 7/2\n sage: variance([1..6], bias=True)\n 35/12\n sage: variance([e, pi])\n 1/2*(pi - e)^2\n sage: variance([])\n NaN\n sage: variance([I, sqrt(2), 3/5])\n 1/450*(10*sqrt(2) - 5*I - 3)^2 + 1/450*(5*sqrt(2) - 10*I + 3)^2\n + 1/450*(5*sqrt(2) + 5*I - 6)^2\n sage: variance([RIF(1.0103, 1.0103), RIF(2)])\n 0.4897530450000000?\n sage: import numpy\n sage: x = numpy.array([1,2,3,4,5])\n sage: variance(x, bias=False)\n 2.5\n sage: x = stats.TimeSeries([1..100])\n sage: variance(x)\n 841.6666666666666\n sage: variance(x, bias=True)\n 833.25\n sage: class MyClass:\n ....: def variance(self, bias = False):\n ....: return 1\n sage: stats.variance(MyClass())\n 1\n sage: class SillyPythonList:\n ....: def __init__(self):\n ....: self.__list = [2, 4]\n ....: def __len__(self):\n ....: return len(self.__list)\n ....: def __iter__(self):\n ....: return self.__list.__iter__()\n ....: def mean(self):\n ....: return 3\n sage: R = SillyPythonList()\n sage: variance(R)\n 2\n sage: variance(R, bias=True)\n 1\n\n TESTS:\n\n The performance issue from :trac:`10019` is solved::\n\n sage: variance([1] * 2^18)\n 0\n '
deprecation(29662, 'sage.stats.basic_stats.variance is deprecated; use numpy.var or numpy.nanvar instead')
if hasattr(v, 'variance'):
return v.variance(bias=bias)
import numpy
x = 0
if isinstance(v, numpy.ndarray):
if bias:
return v.var()
else:
return v.var(ddof=1)
if (not v):
return NaN
mu = mean(v)
for vi in v:
x += ((vi - mu) ** 2)
if bias:
if isinstance(x, int):
return (x / ZZ(len(v)))
return (x / len(v))
else:
if isinstance(x, int):
return (x / ZZ((len(v) - 1)))
return (x / (len(v) - 1))<|docstring|>Return the variance of the elements of `v`.
We define the variance of the empty list to be NaN,
following the convention of MATLAB, Scipy, and R.
This function is deprecated. Use ``numpy.var`` or ``numpy.nanvar``
instead.
INPUT:
- `v` -- a list of numbers
- ``bias`` -- bool (default: False); if False, divide by
len(v) - 1 instead of len(v)
to give a less biased estimator (sample) for the
standard deviation.
OUTPUT:
- a number
EXAMPLES::
sage: variance([1..6])
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.variance is deprecated; use numpy.var or numpy.nanvar instead
See https://trac.sagemath.org/29662 for details.
7/2
sage: variance([1..6], bias=True)
35/12
sage: variance([e, pi])
1/2*(pi - e)^2
sage: variance([])
NaN
sage: variance([I, sqrt(2), 3/5])
1/450*(10*sqrt(2) - 5*I - 3)^2 + 1/450*(5*sqrt(2) - 10*I + 3)^2
+ 1/450*(5*sqrt(2) + 5*I - 6)^2
sage: variance([RIF(1.0103, 1.0103), RIF(2)])
0.4897530450000000?
sage: import numpy
sage: x = numpy.array([1,2,3,4,5])
sage: variance(x, bias=False)
2.5
sage: x = stats.TimeSeries([1..100])
sage: variance(x)
841.6666666666666
sage: variance(x, bias=True)
833.25
sage: class MyClass:
....: def variance(self, bias = False):
....: return 1
sage: stats.variance(MyClass())
1
sage: class SillyPythonList:
....: def __init__(self):
....: self.__list = [2, 4]
....: def __len__(self):
....: return len(self.__list)
....: def __iter__(self):
....: return self.__list.__iter__()
....: def mean(self):
....: return 3
sage: R = SillyPythonList()
sage: variance(R)
2
sage: variance(R, bias=True)
1
TESTS:
The performance issue from :trac:`10019` is solved::
sage: variance([1] * 2^18)
0<|endoftext|> |
8aa4c24f24c0e81a3b54edacd91362c26d799523f1fb0f324918a4010bc04fb0 | def median(v):
"\n Return the median (middle value) of the elements of `v`\n\n If `v` is empty, we define the median to be NaN, which is\n consistent with NumPy (note that R returns NULL).\n If `v` is comprised of strings, TypeError occurs.\n For elements other than numbers, the median is a result of ``sorted()``.\n\n This function is deprecated. Use ``numpy.median`` or ``numpy.nanmedian``\n instead.\n\n INPUT:\n\n - `v` -- a list\n\n OUTPUT:\n\n - median element of `v`\n\n EXAMPLES::\n\n sage: median([1,2,3,4,5])\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.median is deprecated; use numpy.median or numpy.nanmedian instead\n See https://trac.sagemath.org/29662 for details.\n 3\n sage: median([e, pi])\n 1/2*pi + 1/2*e\n sage: median(['sage', 'linux', 'python'])\n 'python'\n sage: median([])\n NaN\n sage: class MyClass:\n ....: def median(self):\n ....: return 1\n sage: stats.median(MyClass())\n 1\n "
deprecation(29662, 'sage.stats.basic_stats.median is deprecated; use numpy.median or numpy.nanmedian instead')
if hasattr(v, 'median'):
return v.median()
if (not v):
return NaN
values = sorted(v)
if (len(values) % 2):
return values[(((len(values) + 1) // 2) - 1)]
else:
lower = values[(((len(values) + 1) // 2) - 1)]
upper = values[(len(values) // 2)]
return ((lower + upper) / ZZ(2)) | Return the median (middle value) of the elements of `v`
If `v` is empty, we define the median to be NaN, which is
consistent with NumPy (note that R returns NULL).
If `v` is comprised of strings, TypeError occurs.
For elements other than numbers, the median is a result of ``sorted()``.
This function is deprecated. Use ``numpy.median`` or ``numpy.nanmedian``
instead.
INPUT:
- `v` -- a list
OUTPUT:
- median element of `v`
EXAMPLES::
sage: median([1,2,3,4,5])
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.median is deprecated; use numpy.median or numpy.nanmedian instead
See https://trac.sagemath.org/29662 for details.
3
sage: median([e, pi])
1/2*pi + 1/2*e
sage: median(['sage', 'linux', 'python'])
'python'
sage: median([])
NaN
sage: class MyClass:
....: def median(self):
....: return 1
sage: stats.median(MyClass())
1 | src/sage/stats/basic_stats.py | median | LaisRast/sage | 1,742 | python | def median(v):
"\n Return the median (middle value) of the elements of `v`\n\n If `v` is empty, we define the median to be NaN, which is\n consistent with NumPy (note that R returns NULL).\n If `v` is comprised of strings, TypeError occurs.\n For elements other than numbers, the median is a result of ``sorted()``.\n\n This function is deprecated. Use ``numpy.median`` or ``numpy.nanmedian``\n instead.\n\n INPUT:\n\n - `v` -- a list\n\n OUTPUT:\n\n - median element of `v`\n\n EXAMPLES::\n\n sage: median([1,2,3,4,5])\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.median is deprecated; use numpy.median or numpy.nanmedian instead\n See https://trac.sagemath.org/29662 for details.\n 3\n sage: median([e, pi])\n 1/2*pi + 1/2*e\n sage: median(['sage', 'linux', 'python'])\n 'python'\n sage: median([])\n NaN\n sage: class MyClass:\n ....: def median(self):\n ....: return 1\n sage: stats.median(MyClass())\n 1\n "
deprecation(29662, 'sage.stats.basic_stats.median is deprecated; use numpy.median or numpy.nanmedian instead')
if hasattr(v, 'median'):
return v.median()
if (not v):
return NaN
values = sorted(v)
if (len(values) % 2):
return values[(((len(values) + 1) // 2) - 1)]
else:
lower = values[(((len(values) + 1) // 2) - 1)]
upper = values[(len(values) // 2)]
return ((lower + upper) / ZZ(2)) | def median(v):
"\n Return the median (middle value) of the elements of `v`\n\n If `v` is empty, we define the median to be NaN, which is\n consistent with NumPy (note that R returns NULL).\n If `v` is comprised of strings, TypeError occurs.\n For elements other than numbers, the median is a result of ``sorted()``.\n\n This function is deprecated. Use ``numpy.median`` or ``numpy.nanmedian``\n instead.\n\n INPUT:\n\n - `v` -- a list\n\n OUTPUT:\n\n - median element of `v`\n\n EXAMPLES::\n\n sage: median([1,2,3,4,5])\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.median is deprecated; use numpy.median or numpy.nanmedian instead\n See https://trac.sagemath.org/29662 for details.\n 3\n sage: median([e, pi])\n 1/2*pi + 1/2*e\n sage: median(['sage', 'linux', 'python'])\n 'python'\n sage: median([])\n NaN\n sage: class MyClass:\n ....: def median(self):\n ....: return 1\n sage: stats.median(MyClass())\n 1\n "
deprecation(29662, 'sage.stats.basic_stats.median is deprecated; use numpy.median or numpy.nanmedian instead')
if hasattr(v, 'median'):
return v.median()
if (not v):
return NaN
values = sorted(v)
if (len(values) % 2):
return values[(((len(values) + 1) // 2) - 1)]
else:
lower = values[(((len(values) + 1) // 2) - 1)]
upper = values[(len(values) // 2)]
return ((lower + upper) / ZZ(2))<|docstring|>Return the median (middle value) of the elements of `v`
If `v` is empty, we define the median to be NaN, which is
consistent with NumPy (note that R returns NULL).
If `v` is comprised of strings, TypeError occurs.
For elements other than numbers, the median is a result of ``sorted()``.
This function is deprecated. Use ``numpy.median`` or ``numpy.nanmedian``
instead.
INPUT:
- `v` -- a list
OUTPUT:
- median element of `v`
EXAMPLES::
sage: median([1,2,3,4,5])
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.median is deprecated; use numpy.median or numpy.nanmedian instead
See https://trac.sagemath.org/29662 for details.
3
sage: median([e, pi])
1/2*pi + 1/2*e
sage: median(['sage', 'linux', 'python'])
'python'
sage: median([])
NaN
sage: class MyClass:
....: def median(self):
....: return 1
sage: stats.median(MyClass())
1<|endoftext|> |
7adca9d4b49527f25bcdd3df4fa359aa2a0906e12335020699556dd98f3e0191 | def moving_average(v, n):
'\n Return the moving average of a list `v`.\n\n The moving average of a list is often used to smooth out noisy data.\n\n If `v` is empty, we define the entries of the moving average to be NaN.\n\n This method is deprecated. Use ``pandas.Series.rolling`` instead.\n\n INPUT:\n\n - `v` -- a list\n\n - `n` -- the number of values used in computing each average.\n\n OUTPUT:\n\n - a list of length ``len(v)-n+1``, since we do not fabric any values\n\n EXAMPLES::\n\n sage: moving_average([1..10], 1)\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.moving_average is deprecated; use pandas.Series.rolling instead\n See https://trac.sagemath.org/29662 for details.\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n sage: moving_average([1..10], 4)\n [5/2, 7/2, 9/2, 11/2, 13/2, 15/2, 17/2]\n sage: moving_average([], 1)\n []\n sage: moving_average([pi, e, I, sqrt(2), 3/5], 2)\n [1/2*pi + 1/2*e, 1/2*e + 1/2*I, 1/2*sqrt(2) + 1/2*I,\n 1/2*sqrt(2) + 3/10]\n\n We check if the input is a time series, and if so use the\n optimized ``simple_moving_average`` method, but with (slightly\n different) meaning as defined above (the point is that the\n ``simple_moving_average`` on time series returns `n` values::\n\n sage: a = stats.TimeSeries([1..10])\n sage: stats.moving_average(a, 3)\n [2.0000, 3.0000, 4.0000, 5.0000, 6.0000, 7.0000, 8.0000, 9.0000]\n sage: stats.moving_average(list(a), 3)\n [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]\n\n '
deprecation(29662, 'sage.stats.basic_stats.moving_average is deprecated; use pandas.Series.rolling instead')
if (not v):
return v
from .time_series import TimeSeries
if isinstance(v, TimeSeries):
return v.simple_moving_average(n)[(n - 1):]
n = int(n)
if (n <= 0):
raise ValueError('n must be positive')
nn = ZZ(n)
s = sum(v[:n])
ans = [(s / nn)]
for i in range(n, len(v)):
s += (v[i] - v[(i - n)])
ans.append((s / nn))
return ans | Return the moving average of a list `v`.
The moving average of a list is often used to smooth out noisy data.
If `v` is empty, we define the entries of the moving average to be NaN.
This method is deprecated. Use ``pandas.Series.rolling`` instead.
INPUT:
- `v` -- a list
- `n` -- the number of values used in computing each average.
OUTPUT:
- a list of length ``len(v)-n+1``, since we do not fabric any values
EXAMPLES::
sage: moving_average([1..10], 1)
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.moving_average is deprecated; use pandas.Series.rolling instead
See https://trac.sagemath.org/29662 for details.
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sage: moving_average([1..10], 4)
[5/2, 7/2, 9/2, 11/2, 13/2, 15/2, 17/2]
sage: moving_average([], 1)
[]
sage: moving_average([pi, e, I, sqrt(2), 3/5], 2)
[1/2*pi + 1/2*e, 1/2*e + 1/2*I, 1/2*sqrt(2) + 1/2*I,
1/2*sqrt(2) + 3/10]
We check if the input is a time series, and if so use the
optimized ``simple_moving_average`` method, but with (slightly
different) meaning as defined above (the point is that the
``simple_moving_average`` on time series returns `n` values::
sage: a = stats.TimeSeries([1..10])
sage: stats.moving_average(a, 3)
[2.0000, 3.0000, 4.0000, 5.0000, 6.0000, 7.0000, 8.0000, 9.0000]
sage: stats.moving_average(list(a), 3)
[2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] | src/sage/stats/basic_stats.py | moving_average | LaisRast/sage | 1,742 | python | def moving_average(v, n):
'\n Return the moving average of a list `v`.\n\n The moving average of a list is often used to smooth out noisy data.\n\n If `v` is empty, we define the entries of the moving average to be NaN.\n\n This method is deprecated. Use ``pandas.Series.rolling`` instead.\n\n INPUT:\n\n - `v` -- a list\n\n - `n` -- the number of values used in computing each average.\n\n OUTPUT:\n\n - a list of length ``len(v)-n+1``, since we do not fabric any values\n\n EXAMPLES::\n\n sage: moving_average([1..10], 1)\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.moving_average is deprecated; use pandas.Series.rolling instead\n See https://trac.sagemath.org/29662 for details.\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n sage: moving_average([1..10], 4)\n [5/2, 7/2, 9/2, 11/2, 13/2, 15/2, 17/2]\n sage: moving_average([], 1)\n []\n sage: moving_average([pi, e, I, sqrt(2), 3/5], 2)\n [1/2*pi + 1/2*e, 1/2*e + 1/2*I, 1/2*sqrt(2) + 1/2*I,\n 1/2*sqrt(2) + 3/10]\n\n We check if the input is a time series, and if so use the\n optimized ``simple_moving_average`` method, but with (slightly\n different) meaning as defined above (the point is that the\n ``simple_moving_average`` on time series returns `n` values::\n\n sage: a = stats.TimeSeries([1..10])\n sage: stats.moving_average(a, 3)\n [2.0000, 3.0000, 4.0000, 5.0000, 6.0000, 7.0000, 8.0000, 9.0000]\n sage: stats.moving_average(list(a), 3)\n [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]\n\n '
deprecation(29662, 'sage.stats.basic_stats.moving_average is deprecated; use pandas.Series.rolling instead')
if (not v):
return v
from .time_series import TimeSeries
if isinstance(v, TimeSeries):
return v.simple_moving_average(n)[(n - 1):]
n = int(n)
if (n <= 0):
raise ValueError('n must be positive')
nn = ZZ(n)
s = sum(v[:n])
ans = [(s / nn)]
for i in range(n, len(v)):
s += (v[i] - v[(i - n)])
ans.append((s / nn))
return ans | def moving_average(v, n):
'\n Return the moving average of a list `v`.\n\n The moving average of a list is often used to smooth out noisy data.\n\n If `v` is empty, we define the entries of the moving average to be NaN.\n\n This method is deprecated. Use ``pandas.Series.rolling`` instead.\n\n INPUT:\n\n - `v` -- a list\n\n - `n` -- the number of values used in computing each average.\n\n OUTPUT:\n\n - a list of length ``len(v)-n+1``, since we do not fabric any values\n\n EXAMPLES::\n\n sage: moving_average([1..10], 1)\n doctest:warning...\n DeprecationWarning: sage.stats.basic_stats.moving_average is deprecated; use pandas.Series.rolling instead\n See https://trac.sagemath.org/29662 for details.\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n sage: moving_average([1..10], 4)\n [5/2, 7/2, 9/2, 11/2, 13/2, 15/2, 17/2]\n sage: moving_average([], 1)\n []\n sage: moving_average([pi, e, I, sqrt(2), 3/5], 2)\n [1/2*pi + 1/2*e, 1/2*e + 1/2*I, 1/2*sqrt(2) + 1/2*I,\n 1/2*sqrt(2) + 3/10]\n\n We check if the input is a time series, and if so use the\n optimized ``simple_moving_average`` method, but with (slightly\n different) meaning as defined above (the point is that the\n ``simple_moving_average`` on time series returns `n` values::\n\n sage: a = stats.TimeSeries([1..10])\n sage: stats.moving_average(a, 3)\n [2.0000, 3.0000, 4.0000, 5.0000, 6.0000, 7.0000, 8.0000, 9.0000]\n sage: stats.moving_average(list(a), 3)\n [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]\n\n '
deprecation(29662, 'sage.stats.basic_stats.moving_average is deprecated; use pandas.Series.rolling instead')
if (not v):
return v
from .time_series import TimeSeries
if isinstance(v, TimeSeries):
return v.simple_moving_average(n)[(n - 1):]
n = int(n)
if (n <= 0):
raise ValueError('n must be positive')
nn = ZZ(n)
s = sum(v[:n])
ans = [(s / nn)]
for i in range(n, len(v)):
s += (v[i] - v[(i - n)])
ans.append((s / nn))
return ans<|docstring|>Return the moving average of a list `v`.
The moving average of a list is often used to smooth out noisy data.
If `v` is empty, we define the entries of the moving average to be NaN.
This method is deprecated. Use ``pandas.Series.rolling`` instead.
INPUT:
- `v` -- a list
- `n` -- the number of values used in computing each average.
OUTPUT:
- a list of length ``len(v)-n+1``, since we do not fabric any values
EXAMPLES::
sage: moving_average([1..10], 1)
doctest:warning...
DeprecationWarning: sage.stats.basic_stats.moving_average is deprecated; use pandas.Series.rolling instead
See https://trac.sagemath.org/29662 for details.
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sage: moving_average([1..10], 4)
[5/2, 7/2, 9/2, 11/2, 13/2, 15/2, 17/2]
sage: moving_average([], 1)
[]
sage: moving_average([pi, e, I, sqrt(2), 3/5], 2)
[1/2*pi + 1/2*e, 1/2*e + 1/2*I, 1/2*sqrt(2) + 1/2*I,
1/2*sqrt(2) + 3/10]
We check if the input is a time series, and if so use the
optimized ``simple_moving_average`` method, but with (slightly
different) meaning as defined above (the point is that the
``simple_moving_average`` on time series returns `n` values::
sage: a = stats.TimeSeries([1..10])
sage: stats.moving_average(a, 3)
[2.0000, 3.0000, 4.0000, 5.0000, 6.0000, 7.0000, 8.0000, 9.0000]
sage: stats.moving_average(list(a), 3)
[2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]<|endoftext|> |
9f86e2304d0fb1f91c18c9ca54fba1e2bc607fe8fd2ea9a68d9cab22afb063f5 | def Whoami(self):
'Rucio Summoner:Whoami\n Results a dictionary to identify the current\n Rucio user and credentials.\n\n :return A dictionary with Rucio whoami information\n '
return self._rucio.Whoami() | Rucio Summoner:Whoami
Results a dictionary to identify the current
Rucio user and credentials.
:return A dictionary with Rucio whoami information | admix/interfaces/rucio_summoner.py | Whoami | XENONnT/admix | 2 | python | def Whoami(self):
'Rucio Summoner:Whoami\n Results a dictionary to identify the current\n Rucio user and credentials.\n\n :return A dictionary with Rucio whoami information\n '
return self._rucio.Whoami() | def Whoami(self):
'Rucio Summoner:Whoami\n Results a dictionary to identify the current\n Rucio user and credentials.\n\n :return A dictionary with Rucio whoami information\n '
return self._rucio.Whoami()<|docstring|>Rucio Summoner:Whoami
Results a dictionary to identify the current
Rucio user and credentials.
:return A dictionary with Rucio whoami information<|endoftext|> |
b25f74bd094f17c90864f75f3425963ca44753ba0042471faac1cab39f9af6b0 | def Alive(self):
'Function: Alive\n Simple print statement to test Rucio setup\n '
whoami = self._rucio.Whoami()
print('Rucio ')
print('Rucio Whoami()')
for (ikey, ival) in whoami.items():
print(ikey, '\t \t', ival)
print()
print('Rucio alive') | Function: Alive
Simple print statement to test Rucio setup | admix/interfaces/rucio_summoner.py | Alive | XENONnT/admix | 2 | python | def Alive(self):
'Function: Alive\n Simple print statement to test Rucio setup\n '
whoami = self._rucio.Whoami()
print('Rucio ')
print('Rucio Whoami()')
for (ikey, ival) in whoami.items():
print(ikey, '\t \t', ival)
print()
print('Rucio alive') | def Alive(self):
'Function: Alive\n Simple print statement to test Rucio setup\n '
whoami = self._rucio.Whoami()
print('Rucio ')
print('Rucio Whoami()')
for (ikey, ival) in whoami.items():
print(ikey, '\t \t', ival)
print()
print('Rucio alive')<|docstring|>Function: Alive
Simple print statement to test Rucio setup<|endoftext|> |
fa81e23684654c539970da9e30d22ad1e4781e20532c3a58192fe4b7cc68175b | def _md5_hash(self, string):
'Function: _md5_hash(...)\n\n Calculate a md5 hash from a string\n\n :param string: A string\n :return result: A md5 checksum of the input string\n '
return hashlib.md5(string.encode('utf-8')).hexdigest() | Function: _md5_hash(...)
Calculate a md5 hash from a string
:param string: A string
:return result: A md5 checksum of the input string | admix/interfaces/rucio_summoner.py | _md5_hash | XENONnT/admix | 2 | python | def _md5_hash(self, string):
'Function: _md5_hash(...)\n\n Calculate a md5 hash from a string\n\n :param string: A string\n :return result: A md5 checksum of the input string\n '
return hashlib.md5(string.encode('utf-8')).hexdigest() | def _md5_hash(self, string):
'Function: _md5_hash(...)\n\n Calculate a md5 hash from a string\n\n :param string: A string\n :return result: A md5 checksum of the input string\n '
return hashlib.md5(string.encode('utf-8')).hexdigest()<|docstring|>Function: _md5_hash(...)
Calculate a md5 hash from a string
:param string: A string
:return result: A md5 checksum of the input string<|endoftext|> |
e0a05fb1d1ab8629f5ad86b37e983ac87ae9318b3f5f0ab6220b4638f38b0ae1 | def _VerifyStructure(self, upload_structure=None, level=(- 1)):
'The Rucio summoner is able to deal with\n two kinds of valid input arguments. To avoid\n a break in the command chain we verify the\n structure here first and prepare further steps.\n The two valid input arguments are:\n - A Rucio scope:name structure (DID) which is encoded\n by a string\n - A stacked container-dataset-file structure which\n is encoded in a dictionary\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return: (val_scope, val_dname): The extracted Rucio DID to which certain operations are applied.\n '
val_scope = None
val_dname = None
if isinstance(upload_structure, str):
try:
val_scope = upload_structure.split(':')[0]
val_dname = upload_structure.split(':')[1]
except IndexError as e:
print('Function _VerifyStructure for Rucio DID input: IndexError')
print('Message:', e)
elif isinstance(upload_structure, dict):
sorted_keys = [key for key in sorted(upload_structure.keys())]
try:
val_scope = upload_structure[sorted_keys[level]]['did'].split(':')[0]
val_dname = upload_structure[sorted_keys[level]]['did'].split(':')[1]
except IndexError as e:
print('Function _VerifyStructure for Rucio template input: IndexError')
print('Message:', e)
return (val_scope, val_dname) | The Rucio summoner is able to deal with
two kinds of valid input arguments. To avoid
a break in the command chain we verify the
structure here first and prepare further steps.
The two valid input arguments are:
- A Rucio scope:name structure (DID) which is encoded
by a string
- A stacked container-dataset-file structure which
is encoded in a dictionary
:param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return: (val_scope, val_dname): The extracted Rucio DID to which certain operations are applied. | admix/interfaces/rucio_summoner.py | _VerifyStructure | XENONnT/admix | 2 | python | def _VerifyStructure(self, upload_structure=None, level=(- 1)):
'The Rucio summoner is able to deal with\n two kinds of valid input arguments. To avoid\n a break in the command chain we verify the\n structure here first and prepare further steps.\n The two valid input arguments are:\n - A Rucio scope:name structure (DID) which is encoded\n by a string\n - A stacked container-dataset-file structure which\n is encoded in a dictionary\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return: (val_scope, val_dname): The extracted Rucio DID to which certain operations are applied.\n '
val_scope = None
val_dname = None
if isinstance(upload_structure, str):
try:
val_scope = upload_structure.split(':')[0]
val_dname = upload_structure.split(':')[1]
except IndexError as e:
print('Function _VerifyStructure for Rucio DID input: IndexError')
print('Message:', e)
elif isinstance(upload_structure, dict):
sorted_keys = [key for key in sorted(upload_structure.keys())]
try:
val_scope = upload_structure[sorted_keys[level]]['did'].split(':')[0]
val_dname = upload_structure[sorted_keys[level]]['did'].split(':')[1]
except IndexError as e:
print('Function _VerifyStructure for Rucio template input: IndexError')
print('Message:', e)
return (val_scope, val_dname) | def _VerifyStructure(self, upload_structure=None, level=(- 1)):
'The Rucio summoner is able to deal with\n two kinds of valid input arguments. To avoid\n a break in the command chain we verify the\n structure here first and prepare further steps.\n The two valid input arguments are:\n - A Rucio scope:name structure (DID) which is encoded\n by a string\n - A stacked container-dataset-file structure which\n is encoded in a dictionary\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return: (val_scope, val_dname): The extracted Rucio DID to which certain operations are applied.\n '
val_scope = None
val_dname = None
if isinstance(upload_structure, str):
try:
val_scope = upload_structure.split(':')[0]
val_dname = upload_structure.split(':')[1]
except IndexError as e:
print('Function _VerifyStructure for Rucio DID input: IndexError')
print('Message:', e)
elif isinstance(upload_structure, dict):
sorted_keys = [key for key in sorted(upload_structure.keys())]
try:
val_scope = upload_structure[sorted_keys[level]]['did'].split(':')[0]
val_dname = upload_structure[sorted_keys[level]]['did'].split(':')[1]
except IndexError as e:
print('Function _VerifyStructure for Rucio template input: IndexError')
print('Message:', e)
return (val_scope, val_dname)<|docstring|>The Rucio summoner is able to deal with
two kinds of valid input arguments. To avoid
a break in the command chain we verify the
structure here first and prepare further steps.
The two valid input arguments are:
- A Rucio scope:name structure (DID) which is encoded
by a string
- A stacked container-dataset-file structure which
is encoded in a dictionary
:param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return: (val_scope, val_dname): The extracted Rucio DID to which certain operations are applied.<|endoftext|> |
923a49dca8d30fbcaa7026b06d0512ca43d5ded30c57e03d400faabc6a855881 | def _IsTemplate(self, upload_structure):
'Function: _IsTemplate()\n\n :param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :return is_template: Returns True if the input is a template_dictionary, otherwise false\n '
is_template = False
val_scope = None
val_dname = None
if isinstance(upload_structure, dict):
sorted_keys = [key for key in sorted(upload_structure.keys())]
level_checks = []
for i_level in sorted_keys:
i_level = upload_structure[i_level]
level_check = False
if (isinstance(i_level, dict) and ('did' in list(i_level.keys())) and ('type' in list(i_level.keys())) and ('tag_words' in list(i_level.keys()))):
level_check = True
level_checks.append(level_check)
if (False not in level_checks):
is_template = True
return is_template | Function: _IsTemplate()
:param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:return is_template: Returns True if the input is a template_dictionary, otherwise false | admix/interfaces/rucio_summoner.py | _IsTemplate | XENONnT/admix | 2 | python | def _IsTemplate(self, upload_structure):
'Function: _IsTemplate()\n\n :param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :return is_template: Returns True if the input is a template_dictionary, otherwise false\n '
is_template = False
val_scope = None
val_dname = None
if isinstance(upload_structure, dict):
sorted_keys = [key for key in sorted(upload_structure.keys())]
level_checks = []
for i_level in sorted_keys:
i_level = upload_structure[i_level]
level_check = False
if (isinstance(i_level, dict) and ('did' in list(i_level.keys())) and ('type' in list(i_level.keys())) and ('tag_words' in list(i_level.keys()))):
level_check = True
level_checks.append(level_check)
if (False not in level_checks):
is_template = True
return is_template | def _IsTemplate(self, upload_structure):
'Function: _IsTemplate()\n\n :param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :return is_template: Returns True if the input is a template_dictionary, otherwise false\n '
is_template = False
val_scope = None
val_dname = None
if isinstance(upload_structure, dict):
sorted_keys = [key for key in sorted(upload_structure.keys())]
level_checks = []
for i_level in sorted_keys:
i_level = upload_structure[i_level]
level_check = False
if (isinstance(i_level, dict) and ('did' in list(i_level.keys())) and ('type' in list(i_level.keys())) and ('tag_words' in list(i_level.keys()))):
level_check = True
level_checks.append(level_check)
if (False not in level_checks):
is_template = True
return is_template<|docstring|>Function: _IsTemplate()
:param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:return is_template: Returns True if the input is a template_dictionary, otherwise false<|endoftext|> |
775d944da536e279ba4b7c871113c0e7aeb9624de0f59ed1ae4c183c64c6b378 | def AddRule(self, did, rse, lifetime=None, protocol='rucio-catalogue', priority=3):
'Add rules for a Rucio DID or dictionary template.\n\n :param: did: Rucio DID form of "scope:name"\n :param: rse: An existing Rucio storage element (RSE)\n :param: lifetime: Choose a lifetime of the transfer rule in seconds or None\n :param: protocol: Should always be \'rucio-catalogue\'?\n :return:\n '
(val_scope, val_dname) = self._VerifyStructure(did)
rules = self._rucio.ListDidRules(val_scope, val_dname)
current_rses = [r['rse_expression'] for r in rules]
if (rse in current_rses):
print(('There already exists a rule for DID %s at RSE %s' % (did, rse)))
return 1
did_dict = {}
did_dict['scope'] = val_scope
did_dict['name'] = val_dname
self._rucio.AddRule([did_dict], copies=1, rse_expression=rse, lifetime=lifetime, priority=priority)
return 0 | Add rules for a Rucio DID or dictionary template.
:param: did: Rucio DID form of "scope:name"
:param: rse: An existing Rucio storage element (RSE)
:param: lifetime: Choose a lifetime of the transfer rule in seconds or None
:param: protocol: Should always be 'rucio-catalogue'?
:return: | admix/interfaces/rucio_summoner.py | AddRule | XENONnT/admix | 2 | python | def AddRule(self, did, rse, lifetime=None, protocol='rucio-catalogue', priority=3):
'Add rules for a Rucio DID or dictionary template.\n\n :param: did: Rucio DID form of "scope:name"\n :param: rse: An existing Rucio storage element (RSE)\n :param: lifetime: Choose a lifetime of the transfer rule in seconds or None\n :param: protocol: Should always be \'rucio-catalogue\'?\n :return:\n '
(val_scope, val_dname) = self._VerifyStructure(did)
rules = self._rucio.ListDidRules(val_scope, val_dname)
current_rses = [r['rse_expression'] for r in rules]
if (rse in current_rses):
print(('There already exists a rule for DID %s at RSE %s' % (did, rse)))
return 1
did_dict = {}
did_dict['scope'] = val_scope
did_dict['name'] = val_dname
self._rucio.AddRule([did_dict], copies=1, rse_expression=rse, lifetime=lifetime, priority=priority)
return 0 | def AddRule(self, did, rse, lifetime=None, protocol='rucio-catalogue', priority=3):
'Add rules for a Rucio DID or dictionary template.\n\n :param: did: Rucio DID form of "scope:name"\n :param: rse: An existing Rucio storage element (RSE)\n :param: lifetime: Choose a lifetime of the transfer rule in seconds or None\n :param: protocol: Should always be \'rucio-catalogue\'?\n :return:\n '
(val_scope, val_dname) = self._VerifyStructure(did)
rules = self._rucio.ListDidRules(val_scope, val_dname)
current_rses = [r['rse_expression'] for r in rules]
if (rse in current_rses):
print(('There already exists a rule for DID %s at RSE %s' % (did, rse)))
return 1
did_dict = {}
did_dict['scope'] = val_scope
did_dict['name'] = val_dname
self._rucio.AddRule([did_dict], copies=1, rse_expression=rse, lifetime=lifetime, priority=priority)
return 0<|docstring|>Add rules for a Rucio DID or dictionary template.
:param: did: Rucio DID form of "scope:name"
:param: rse: An existing Rucio storage element (RSE)
:param: lifetime: Choose a lifetime of the transfer rule in seconds or None
:param: protocol: Should always be 'rucio-catalogue'?
:return:<|endoftext|> |
611fed30647511dbe62e97f1cab96e7a52059b934a9f3cf386964811cde8bb90 | def AddConditionalRule(self, did, from_rse, to_rse, lifetime=None, protocol='rucio-catalogue', priority=3):
'Add rules for a Rucio DID or dictionary template.\n\n :param: did: Rucio DID form of "scope:name"\n :param: rse: An existing Rucio storage element (RSE)\n :param: lifetime: Choose a lifetime of the transfer rule in seconds or None\n :param: protocol: Should always be \'rucio-catalogue\'?\n :return:\n '
(val_scope, val_dname) = self._VerifyStructure(did)
rules = self._rucio.ListDidRules(val_scope, val_dname)
current_rses = [r['rse_expression'] for r in rules]
if (to_rse in current_rses):
print(('There already exists a rule for DID %s at RSE %s' % (did, to_rse)))
return 1
did_dict = {}
did_dict['scope'] = val_scope
did_dict['name'] = val_dname
self._rucio.AddRule([did_dict], copies=1, rse_expression=to_rse, source_replica_expression=from_rse, lifetime=lifetime, priority=priority)
return 0 | Add rules for a Rucio DID or dictionary template.
:param: did: Rucio DID form of "scope:name"
:param: rse: An existing Rucio storage element (RSE)
:param: lifetime: Choose a lifetime of the transfer rule in seconds or None
:param: protocol: Should always be 'rucio-catalogue'?
:return: | admix/interfaces/rucio_summoner.py | AddConditionalRule | XENONnT/admix | 2 | python | def AddConditionalRule(self, did, from_rse, to_rse, lifetime=None, protocol='rucio-catalogue', priority=3):
'Add rules for a Rucio DID or dictionary template.\n\n :param: did: Rucio DID form of "scope:name"\n :param: rse: An existing Rucio storage element (RSE)\n :param: lifetime: Choose a lifetime of the transfer rule in seconds or None\n :param: protocol: Should always be \'rucio-catalogue\'?\n :return:\n '
(val_scope, val_dname) = self._VerifyStructure(did)
rules = self._rucio.ListDidRules(val_scope, val_dname)
current_rses = [r['rse_expression'] for r in rules]
if (to_rse in current_rses):
print(('There already exists a rule for DID %s at RSE %s' % (did, to_rse)))
return 1
did_dict = {}
did_dict['scope'] = val_scope
did_dict['name'] = val_dname
self._rucio.AddRule([did_dict], copies=1, rse_expression=to_rse, source_replica_expression=from_rse, lifetime=lifetime, priority=priority)
return 0 | def AddConditionalRule(self, did, from_rse, to_rse, lifetime=None, protocol='rucio-catalogue', priority=3):
'Add rules for a Rucio DID or dictionary template.\n\n :param: did: Rucio DID form of "scope:name"\n :param: rse: An existing Rucio storage element (RSE)\n :param: lifetime: Choose a lifetime of the transfer rule in seconds or None\n :param: protocol: Should always be \'rucio-catalogue\'?\n :return:\n '
(val_scope, val_dname) = self._VerifyStructure(did)
rules = self._rucio.ListDidRules(val_scope, val_dname)
current_rses = [r['rse_expression'] for r in rules]
if (to_rse in current_rses):
print(('There already exists a rule for DID %s at RSE %s' % (did, to_rse)))
return 1
did_dict = {}
did_dict['scope'] = val_scope
did_dict['name'] = val_dname
self._rucio.AddRule([did_dict], copies=1, rse_expression=to_rse, source_replica_expression=from_rse, lifetime=lifetime, priority=priority)
return 0<|docstring|>Add rules for a Rucio DID or dictionary template.
:param: did: Rucio DID form of "scope:name"
:param: rse: An existing Rucio storage element (RSE)
:param: lifetime: Choose a lifetime of the transfer rule in seconds or None
:param: protocol: Should always be 'rucio-catalogue'?
:return:<|endoftext|> |
19d3027c890a258d15b5823dbdae3f07172f1a9ed7902224e267be87d8b185a6 | def UpdateRules(self, upload_structure=None, rse_rules=None, level=(- 1)):
'Update existing rules for a Rucio DID or dictionary template.\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: rse_rules: A list of strings which follow a certain template of ["{protocol}:{rse}:{lifetime}",...]\n With:\n protocol: rucio-catalogue\n rse: An existing Rucio storage element (RSE)\n lifetime: Choose a lifetime of the transfer rule in seconds or None\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return result: A dictionary with Rucio Storage Elements (RSE) as keys. The value is another dictionary\n with keys \'result\' (0 on success, 1 on failure) and lifetime ( an integer > 0)\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
r_rules = self._rucio.ListDidRules(val_scope, val_dname)
r_rse_ids = {}
for i_rule in r_rules:
r_rse = i_rule['rse_expression']
r_rse_ids[r_rse] = i_rule['id']
result = {}
for i_rule in rse_rules:
g_ptr = i_rule.split(':')[0]
g_rse = i_rule.split(':')[1]
g_rlt = i_rule.split(':')[2]
if (g_rlt == 'None'):
g_rlt = None
else:
g_rlt = int(g_rlt)
if (g_rse not in list(r_rse_ids.keys())):
continue
options = {}
options['lifetime'] = g_rlt
r = self._rucio.UpdateRule(r_rse_ids[g_rse], options)
result[g_rse] = {}
result[g_rse]['result'] = r
result[g_rse]['lifetime'] = g_rlt
return result | Update existing rules for a Rucio DID or dictionary template.
:param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param: rse_rules: A list of strings which follow a certain template of ["{protocol}:{rse}:{lifetime}",...]
With:
protocol: rucio-catalogue
rse: An existing Rucio storage element (RSE)
lifetime: Choose a lifetime of the transfer rule in seconds or None
:param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return result: A dictionary with Rucio Storage Elements (RSE) as keys. The value is another dictionary
with keys 'result' (0 on success, 1 on failure) and lifetime ( an integer > 0) | admix/interfaces/rucio_summoner.py | UpdateRules | XENONnT/admix | 2 | python | def UpdateRules(self, upload_structure=None, rse_rules=None, level=(- 1)):
'Update existing rules for a Rucio DID or dictionary template.\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: rse_rules: A list of strings which follow a certain template of ["{protocol}:{rse}:{lifetime}",...]\n With:\n protocol: rucio-catalogue\n rse: An existing Rucio storage element (RSE)\n lifetime: Choose a lifetime of the transfer rule in seconds or None\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return result: A dictionary with Rucio Storage Elements (RSE) as keys. The value is another dictionary\n with keys \'result\' (0 on success, 1 on failure) and lifetime ( an integer > 0)\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
r_rules = self._rucio.ListDidRules(val_scope, val_dname)
r_rse_ids = {}
for i_rule in r_rules:
r_rse = i_rule['rse_expression']
r_rse_ids[r_rse] = i_rule['id']
result = {}
for i_rule in rse_rules:
g_ptr = i_rule.split(':')[0]
g_rse = i_rule.split(':')[1]
g_rlt = i_rule.split(':')[2]
if (g_rlt == 'None'):
g_rlt = None
else:
g_rlt = int(g_rlt)
if (g_rse not in list(r_rse_ids.keys())):
continue
options = {}
options['lifetime'] = g_rlt
r = self._rucio.UpdateRule(r_rse_ids[g_rse], options)
result[g_rse] = {}
result[g_rse]['result'] = r
result[g_rse]['lifetime'] = g_rlt
return result | def UpdateRules(self, upload_structure=None, rse_rules=None, level=(- 1)):
'Update existing rules for a Rucio DID or dictionary template.\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: rse_rules: A list of strings which follow a certain template of ["{protocol}:{rse}:{lifetime}",...]\n With:\n protocol: rucio-catalogue\n rse: An existing Rucio storage element (RSE)\n lifetime: Choose a lifetime of the transfer rule in seconds or None\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return result: A dictionary with Rucio Storage Elements (RSE) as keys. The value is another dictionary\n with keys \'result\' (0 on success, 1 on failure) and lifetime ( an integer > 0)\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
r_rules = self._rucio.ListDidRules(val_scope, val_dname)
r_rse_ids = {}
for i_rule in r_rules:
r_rse = i_rule['rse_expression']
r_rse_ids[r_rse] = i_rule['id']
result = {}
for i_rule in rse_rules:
g_ptr = i_rule.split(':')[0]
g_rse = i_rule.split(':')[1]
g_rlt = i_rule.split(':')[2]
if (g_rlt == 'None'):
g_rlt = None
else:
g_rlt = int(g_rlt)
if (g_rse not in list(r_rse_ids.keys())):
continue
options = {}
options['lifetime'] = g_rlt
r = self._rucio.UpdateRule(r_rse_ids[g_rse], options)
result[g_rse] = {}
result[g_rse]['result'] = r
result[g_rse]['lifetime'] = g_rlt
return result<|docstring|>Update existing rules for a Rucio DID or dictionary template.
:param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param: rse_rules: A list of strings which follow a certain template of ["{protocol}:{rse}:{lifetime}",...]
With:
protocol: rucio-catalogue
rse: An existing Rucio storage element (RSE)
lifetime: Choose a lifetime of the transfer rule in seconds or None
:param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return result: A dictionary with Rucio Storage Elements (RSE) as keys. The value is another dictionary
with keys 'result' (0 on success, 1 on failure) and lifetime ( an integer > 0)<|endoftext|> |
5f989e1099fc62c1b958c943d3c093c425c1c5dda51ca894dd584a64d59b398c | def _rule_status_dictionary(self):
'This dictionary defines the full set of rule information\n what is returned from Rucio and dedicated to further usage.\n Add information carefully if you need to. Removing anything from\n this dictionary breaks aDMIX.'
rule = {}
rule['rse'] = None
rule['exists'] = False
rule['state'] = 'Unkown'
rule['cnt_ok'] = 0
rule['cnt_repl'] = 0
rule['cnt_stuck'] = 0
rule['id'] = None
rule['expires'] = None
return rule | This dictionary defines the full set of rule information
what is returned from Rucio and dedicated to further usage.
Add information carefully if you need to. Removing anything from
this dictionary breaks aDMIX. | admix/interfaces/rucio_summoner.py | _rule_status_dictionary | XENONnT/admix | 2 | python | def _rule_status_dictionary(self):
'This dictionary defines the full set of rule information\n what is returned from Rucio and dedicated to further usage.\n Add information carefully if you need to. Removing anything from\n this dictionary breaks aDMIX.'
rule = {}
rule['rse'] = None
rule['exists'] = False
rule['state'] = 'Unkown'
rule['cnt_ok'] = 0
rule['cnt_repl'] = 0
rule['cnt_stuck'] = 0
rule['id'] = None
rule['expires'] = None
return rule | def _rule_status_dictionary(self):
'This dictionary defines the full set of rule information\n what is returned from Rucio and dedicated to further usage.\n Add information carefully if you need to. Removing anything from\n this dictionary breaks aDMIX.'
rule = {}
rule['rse'] = None
rule['exists'] = False
rule['state'] = 'Unkown'
rule['cnt_ok'] = 0
rule['cnt_repl'] = 0
rule['cnt_stuck'] = 0
rule['id'] = None
rule['expires'] = None
return rule<|docstring|>This dictionary defines the full set of rule information
what is returned from Rucio and dedicated to further usage.
Add information carefully if you need to. Removing anything from
this dictionary breaks aDMIX.<|endoftext|> |
f1db240755c2abc1a0dda8e4cdd51b3d75514326efcb13f4dffb0cbc9b19f0a0 | def ListDidRules(self, upload_structure=None, level=(- 1)):
'List existing rules for a Rucio DID or dictionary template.\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return: A list of Rucio transfer rules with additional rule information. Each list element stands for a\n Rucio Storage Element (RSE). If no rule exists it returns an empty list\n\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
r_rules = self._rucio.ListDidRules(val_scope, val_dname)
return r_rules | List existing rules for a Rucio DID or dictionary template.
:param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return: A list of Rucio transfer rules with additional rule information. Each list element stands for a
Rucio Storage Element (RSE). If no rule exists it returns an empty list | admix/interfaces/rucio_summoner.py | ListDidRules | XENONnT/admix | 2 | python | def ListDidRules(self, upload_structure=None, level=(- 1)):
'List existing rules for a Rucio DID or dictionary template.\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return: A list of Rucio transfer rules with additional rule information. Each list element stands for a\n Rucio Storage Element (RSE). If no rule exists it returns an empty list\n\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
r_rules = self._rucio.ListDidRules(val_scope, val_dname)
return r_rules | def ListDidRules(self, upload_structure=None, level=(- 1)):
'List existing rules for a Rucio DID or dictionary template.\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return: A list of Rucio transfer rules with additional rule information. Each list element stands for a\n Rucio Storage Element (RSE). If no rule exists it returns an empty list\n\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
r_rules = self._rucio.ListDidRules(val_scope, val_dname)
return r_rules<|docstring|>List existing rules for a Rucio DID or dictionary template.
:param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return: A list of Rucio transfer rules with additional rule information. Each list element stands for a
Rucio Storage Element (RSE). If no rule exists it returns an empty list<|endoftext|> |
5eecbeebe07c11a819a06ea46113bc9f7f88251f9b887e500b87a99bc473443e | def ListFileReplicas(self, upload_structure=None, rse=None, level=(- 1), localpath=False):
'Function: ListFileReplicas(...)\n\n List all your file replicas which are attached to a dataset or container.\n\n Hint: List of RSE wide file replicas (local path) was not available in Rucio 1.19.\n\n :param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param rse: A valid Rucio Storage Element (RSE) of the current Rucio setting.\n :param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return result: Dictionary which with key->value ordering follows:\n\n - key: filename of the attached file\n\n - value: The local file location for the selected RSE\n\n Otherwise: {}\n\n '
result = {}
file_list = [i_file['name'] for i_file in self.ListFiles(upload_structure, level)]
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
list_rse = [i_rse['rse'] for i_rse in list(self._rucio.ListRSEs())]
if (rse not in list_rse):
return result
rse_overview = self._rucio.GetRSE(rse)
_istape = False
if (rse_overview.get('rse_type') == 'DISK'):
_istape = False
else:
_istape = True
rse_hostname = rse_overview['protocols'][0]['hostname']
rse_prefix = rse_overview['protocols'][0]['prefix']
rse_port = rse_overview['protocols'][0]['port']
rse_scheme = rse_overview['protocols'][0]['scheme']
lfn = None
lfn_disk = '{protocol}://{hostname}:{port}{prefix}/{scope}/{h1}/{h2}/{fname}'
lfn_local_disk = '{prefix}/{scope}/{h1}/{h2}/{fname}'
lfn_tape = '{protocol}://{hostname}:{port}{prefix}/{scope}/{fname}'
if (_istape == False):
for i_filename in file_list:
rucio_did = '{scope}:{name}'.format(scope=val_scope, name=i_filename)
t1 = self._md5_hash(rucio_did)[0:2]
t2 = self._md5_hash(rucio_did)[2:4]
if localpath:
lfn = lfn_local_disk.format(prefix=rse_prefix, scope=val_scope, h1=t1, h2=t2, fname=i_filename)
result[i_filename] = lfn
else:
lfn = lfn_disk.format(protocol=rse_scheme, hostname=rse_hostname, port=rse_port, prefix=rse_prefix, scope=val_scope, h1=t1, h2=t2, fname=i_filename)
result[i_filename] = lfn
else:
for i_filename in file_list:
lfn = lfn_tape.format(protocol=rse_scheme, hostname=rse_hostname, port=rse_port, prefix=rse_prefix, scope=val_scope, fname=i_filename)
result[i_filename] = lfn
return result | Function: ListFileReplicas(...)
List all your file replicas which are attached to a dataset or container.
Hint: List of RSE wide file replicas (local path) was not available in Rucio 1.19.
:param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param rse: A valid Rucio Storage Element (RSE) of the current Rucio setting.
:param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return result: Dictionary which with key->value ordering follows:
- key: filename of the attached file
- value: The local file location for the selected RSE
Otherwise: {} | admix/interfaces/rucio_summoner.py | ListFileReplicas | XENONnT/admix | 2 | python | def ListFileReplicas(self, upload_structure=None, rse=None, level=(- 1), localpath=False):
'Function: ListFileReplicas(...)\n\n List all your file replicas which are attached to a dataset or container.\n\n Hint: List of RSE wide file replicas (local path) was not available in Rucio 1.19.\n\n :param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param rse: A valid Rucio Storage Element (RSE) of the current Rucio setting.\n :param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return result: Dictionary which with key->value ordering follows:\n\n - key: filename of the attached file\n\n - value: The local file location for the selected RSE\n\n Otherwise: {}\n\n '
result = {}
file_list = [i_file['name'] for i_file in self.ListFiles(upload_structure, level)]
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
list_rse = [i_rse['rse'] for i_rse in list(self._rucio.ListRSEs())]
if (rse not in list_rse):
return result
rse_overview = self._rucio.GetRSE(rse)
_istape = False
if (rse_overview.get('rse_type') == 'DISK'):
_istape = False
else:
_istape = True
rse_hostname = rse_overview['protocols'][0]['hostname']
rse_prefix = rse_overview['protocols'][0]['prefix']
rse_port = rse_overview['protocols'][0]['port']
rse_scheme = rse_overview['protocols'][0]['scheme']
lfn = None
lfn_disk = '{protocol}://{hostname}:{port}{prefix}/{scope}/{h1}/{h2}/{fname}'
lfn_local_disk = '{prefix}/{scope}/{h1}/{h2}/{fname}'
lfn_tape = '{protocol}://{hostname}:{port}{prefix}/{scope}/{fname}'
if (_istape == False):
for i_filename in file_list:
rucio_did = '{scope}:{name}'.format(scope=val_scope, name=i_filename)
t1 = self._md5_hash(rucio_did)[0:2]
t2 = self._md5_hash(rucio_did)[2:4]
if localpath:
lfn = lfn_local_disk.format(prefix=rse_prefix, scope=val_scope, h1=t1, h2=t2, fname=i_filename)
result[i_filename] = lfn
else:
lfn = lfn_disk.format(protocol=rse_scheme, hostname=rse_hostname, port=rse_port, prefix=rse_prefix, scope=val_scope, h1=t1, h2=t2, fname=i_filename)
result[i_filename] = lfn
else:
for i_filename in file_list:
lfn = lfn_tape.format(protocol=rse_scheme, hostname=rse_hostname, port=rse_port, prefix=rse_prefix, scope=val_scope, fname=i_filename)
result[i_filename] = lfn
return result | def ListFileReplicas(self, upload_structure=None, rse=None, level=(- 1), localpath=False):
'Function: ListFileReplicas(...)\n\n List all your file replicas which are attached to a dataset or container.\n\n Hint: List of RSE wide file replicas (local path) was not available in Rucio 1.19.\n\n :param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param rse: A valid Rucio Storage Element (RSE) of the current Rucio setting.\n :param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return result: Dictionary which with key->value ordering follows:\n\n - key: filename of the attached file\n\n - value: The local file location for the selected RSE\n\n Otherwise: {}\n\n '
result = {}
file_list = [i_file['name'] for i_file in self.ListFiles(upload_structure, level)]
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
list_rse = [i_rse['rse'] for i_rse in list(self._rucio.ListRSEs())]
if (rse not in list_rse):
return result
rse_overview = self._rucio.GetRSE(rse)
_istape = False
if (rse_overview.get('rse_type') == 'DISK'):
_istape = False
else:
_istape = True
rse_hostname = rse_overview['protocols'][0]['hostname']
rse_prefix = rse_overview['protocols'][0]['prefix']
rse_port = rse_overview['protocols'][0]['port']
rse_scheme = rse_overview['protocols'][0]['scheme']
lfn = None
lfn_disk = '{protocol}://{hostname}:{port}{prefix}/{scope}/{h1}/{h2}/{fname}'
lfn_local_disk = '{prefix}/{scope}/{h1}/{h2}/{fname}'
lfn_tape = '{protocol}://{hostname}:{port}{prefix}/{scope}/{fname}'
if (_istape == False):
for i_filename in file_list:
rucio_did = '{scope}:{name}'.format(scope=val_scope, name=i_filename)
t1 = self._md5_hash(rucio_did)[0:2]
t2 = self._md5_hash(rucio_did)[2:4]
if localpath:
lfn = lfn_local_disk.format(prefix=rse_prefix, scope=val_scope, h1=t1, h2=t2, fname=i_filename)
result[i_filename] = lfn
else:
lfn = lfn_disk.format(protocol=rse_scheme, hostname=rse_hostname, port=rse_port, prefix=rse_prefix, scope=val_scope, h1=t1, h2=t2, fname=i_filename)
result[i_filename] = lfn
else:
for i_filename in file_list:
lfn = lfn_tape.format(protocol=rse_scheme, hostname=rse_hostname, port=rse_port, prefix=rse_prefix, scope=val_scope, fname=i_filename)
result[i_filename] = lfn
return result<|docstring|>Function: ListFileReplicas(...)
List all your file replicas which are attached to a dataset or container.
Hint: List of RSE wide file replicas (local path) was not available in Rucio 1.19.
:param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param rse: A valid Rucio Storage Element (RSE) of the current Rucio setting.
:param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return result: Dictionary which with key->value ordering follows:
- key: filename of the attached file
- value: The local file location for the selected RSE
Otherwise: {}<|endoftext|> |
b14105686a4789348f766e8a6b1f133f0210bca35380b715d5536785402a5c7d | def ListFiles(self, upload_structure=None, long=None, level=(- 1)):
'List existing files for a Rucio DID or dictionary template.\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :param long: Define another output (Check the Rucio tutorials for it)\n :return result: A list of files, otherwise []\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
result = []
result = self._rucio.ListFiles(val_scope, val_dname, long=long)
return result | List existing files for a Rucio DID or dictionary template.
:param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:param long: Define another output (Check the Rucio tutorials for it)
:return result: A list of files, otherwise [] | admix/interfaces/rucio_summoner.py | ListFiles | XENONnT/admix | 2 | python | def ListFiles(self, upload_structure=None, long=None, level=(- 1)):
'List existing files for a Rucio DID or dictionary template.\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :param long: Define another output (Check the Rucio tutorials for it)\n :return result: A list of files, otherwise []\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
result = []
result = self._rucio.ListFiles(val_scope, val_dname, long=long)
return result | def ListFiles(self, upload_structure=None, long=None, level=(- 1)):
'List existing files for a Rucio DID or dictionary template.\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :param long: Define another output (Check the Rucio tutorials for it)\n :return result: A list of files, otherwise []\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
result = []
result = self._rucio.ListFiles(val_scope, val_dname, long=long)
return result<|docstring|>List existing files for a Rucio DID or dictionary template.
:param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:param long: Define another output (Check the Rucio tutorials for it)
:return result: A list of files, otherwise []<|endoftext|> |
a3a17f5e878e6f8039825bbcb8134de7e32938b76fbb1f4595f2d61bd32fde1a | def ListDids(self, scope, filters, type='collection', long=False, recursive=False):
"\n List all data identifiers in a scope which match a given pattern. Check Rucio github page for details\n\n :param scope: The valid string which follows the Rucio scope name.\n :param filters: A dictionary of key/value pairs like {'name': 'file_name','rse-expression': 'tier0'}.\n :param type: The type of the did: 'all'(container, dataset or file)|'collection'(dataset or container)|'dataset'|'container'|'file'\n :param long: Long format option to display more information for each DID.\n :param result: Recursively list DIDs content.\n "
result = []
result = self._rucio.ListDids(scope=scope, filters=filters, type=type, long=long, recursive=recursive)
return result | List all data identifiers in a scope which match a given pattern. Check Rucio github page for details
:param scope: The valid string which follows the Rucio scope name.
:param filters: A dictionary of key/value pairs like {'name': 'file_name','rse-expression': 'tier0'}.
:param type: The type of the did: 'all'(container, dataset or file)|'collection'(dataset or container)|'dataset'|'container'|'file'
:param long: Long format option to display more information for each DID.
:param result: Recursively list DIDs content. | admix/interfaces/rucio_summoner.py | ListDids | XENONnT/admix | 2 | python | def ListDids(self, scope, filters, type='collection', long=False, recursive=False):
"\n List all data identifiers in a scope which match a given pattern. Check Rucio github page for details\n\n :param scope: The valid string which follows the Rucio scope name.\n :param filters: A dictionary of key/value pairs like {'name': 'file_name','rse-expression': 'tier0'}.\n :param type: The type of the did: 'all'(container, dataset or file)|'collection'(dataset or container)|'dataset'|'container'|'file'\n :param long: Long format option to display more information for each DID.\n :param result: Recursively list DIDs content.\n "
result = []
result = self._rucio.ListDids(scope=scope, filters=filters, type=type, long=long, recursive=recursive)
return result | def ListDids(self, scope, filters, type='collection', long=False, recursive=False):
"\n List all data identifiers in a scope which match a given pattern. Check Rucio github page for details\n\n :param scope: The valid string which follows the Rucio scope name.\n :param filters: A dictionary of key/value pairs like {'name': 'file_name','rse-expression': 'tier0'}.\n :param type: The type of the did: 'all'(container, dataset or file)|'collection'(dataset or container)|'dataset'|'container'|'file'\n :param long: Long format option to display more information for each DID.\n :param result: Recursively list DIDs content.\n "
result = []
result = self._rucio.ListDids(scope=scope, filters=filters, type=type, long=long, recursive=recursive)
return result<|docstring|>List all data identifiers in a scope which match a given pattern. Check Rucio github page for details
:param scope: The valid string which follows the Rucio scope name.
:param filters: A dictionary of key/value pairs like {'name': 'file_name','rse-expression': 'tier0'}.
:param type: The type of the did: 'all'(container, dataset or file)|'collection'(dataset or container)|'dataset'|'container'|'file'
:param long: Long format option to display more information for each DID.
:param result: Recursively list DIDs content.<|endoftext|> |
f06ade64b95645addb1bd60d6f99d0d3119f2d0c5b2693e38fabdbecaa7cbc66 | def GetRule(self, upload_structure=None, rse=None, level=(- 1)):
'This function checks if for a given upload structure or Rucio DID a requested\n upload destination rule exists in Rucio already and returns a standardized\n dictionary.\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: rse: A valid Rucio Storage Element (RSE)\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return rule: A dictionary of pre-definied rule information.\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
r_rules = self._rucio.ListDidRules(val_scope, val_dname)
if (len(r_rules) == 0):
return self._rule_status_dictionary()
rule = self._rule_status_dictionary()
for i_rule in r_rules:
if ((rse != None) and (i_rule['rse_expression'] != rse)):
continue
rule['rse'] = i_rule['rse_expression']
rule['exists'] = True
rule['state'] = i_rule['state']
rule['cnt_ok'] = i_rule['locks_ok_cnt']
rule['cnt_repl'] = i_rule['locks_replicating_cnt']
rule['cnt_stuck'] = i_rule['locks_stuck_cnt']
rule['id'] = i_rule['id']
if (i_rule['expires_at'] == None):
rule['expires'] = None
else:
rule['expires'] = i_rule['expires_at'].strftime('%Y-%m-%d-%H:%M:%S')
return rule | This function checks if for a given upload structure or Rucio DID a requested
upload destination rule exists in Rucio already and returns a standardized
dictionary.
:param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param: rse: A valid Rucio Storage Element (RSE)
:param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return rule: A dictionary of pre-definied rule information. | admix/interfaces/rucio_summoner.py | GetRule | XENONnT/admix | 2 | python | def GetRule(self, upload_structure=None, rse=None, level=(- 1)):
'This function checks if for a given upload structure or Rucio DID a requested\n upload destination rule exists in Rucio already and returns a standardized\n dictionary.\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: rse: A valid Rucio Storage Element (RSE)\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return rule: A dictionary of pre-definied rule information.\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
r_rules = self._rucio.ListDidRules(val_scope, val_dname)
if (len(r_rules) == 0):
return self._rule_status_dictionary()
rule = self._rule_status_dictionary()
for i_rule in r_rules:
if ((rse != None) and (i_rule['rse_expression'] != rse)):
continue
rule['rse'] = i_rule['rse_expression']
rule['exists'] = True
rule['state'] = i_rule['state']
rule['cnt_ok'] = i_rule['locks_ok_cnt']
rule['cnt_repl'] = i_rule['locks_replicating_cnt']
rule['cnt_stuck'] = i_rule['locks_stuck_cnt']
rule['id'] = i_rule['id']
if (i_rule['expires_at'] == None):
rule['expires'] = None
else:
rule['expires'] = i_rule['expires_at'].strftime('%Y-%m-%d-%H:%M:%S')
return rule | def GetRule(self, upload_structure=None, rse=None, level=(- 1)):
'This function checks if for a given upload structure or Rucio DID a requested\n upload destination rule exists in Rucio already and returns a standardized\n dictionary.\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param: rse: A valid Rucio Storage Element (RSE)\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return rule: A dictionary of pre-definied rule information.\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
r_rules = self._rucio.ListDidRules(val_scope, val_dname)
if (len(r_rules) == 0):
return self._rule_status_dictionary()
rule = self._rule_status_dictionary()
for i_rule in r_rules:
if ((rse != None) and (i_rule['rse_expression'] != rse)):
continue
rule['rse'] = i_rule['rse_expression']
rule['exists'] = True
rule['state'] = i_rule['state']
rule['cnt_ok'] = i_rule['locks_ok_cnt']
rule['cnt_repl'] = i_rule['locks_replicating_cnt']
rule['cnt_stuck'] = i_rule['locks_stuck_cnt']
rule['id'] = i_rule['id']
if (i_rule['expires_at'] == None):
rule['expires'] = None
else:
rule['expires'] = i_rule['expires_at'].strftime('%Y-%m-%d-%H:%M:%S')
return rule<|docstring|>This function checks if for a given upload structure or Rucio DID a requested
upload destination rule exists in Rucio already and returns a standardized
dictionary.
:param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param: rse: A valid Rucio Storage Element (RSE)
:param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return rule: A dictionary of pre-definied rule information.<|endoftext|> |
8b5343d87d81f852a463b768f71cc3c2efa75468e963f282e52c79790b9d3d1a | def CheckRule(self, upload_structure=None, rse=None, level=(- 1)):
'Check the status message for a Rucio DID or dictionary template rule.\n This is a shortcut in combination with the memberfunction GetRule()\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary. The member\n function GetRule(...) evaluates the upload_structure variable.\n :param: rse: Specify a valid Rucio Storage Element (RSE)\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return: r_status: Rucio rule status: OK, STUCK or REPLICATING. If rule does not exists returns NoRule\n '
if (rse == None):
return 'NoRule'
rule = self.GetRule(upload_structure, rse, level)
r_status = None
if (rule['exists'] == False):
r_status = 'NoRule'
elif ((rule['exists'] == True) and (rule['state'] == 'OK') and (rule['cnt_ok'] > 0) and (rule['cnt_repl'] == 0) and (rule['cnt_stuck'] == 0)):
r_status = 'OK'
elif ((rule['exists'] == True) and (rule['state'] == 'REPLICATING')):
r_status = 'REPLICATING'
elif ((rule['exists'] == True) and (rule['state'] == 'STUCK')):
r_status = 'STUCK'
return r_status | Check the status message for a Rucio DID or dictionary template rule.
This is a shortcut in combination with the memberfunction GetRule()
:param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary. The member
function GetRule(...) evaluates the upload_structure variable.
:param: rse: Specify a valid Rucio Storage Element (RSE)
:param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return: r_status: Rucio rule status: OK, STUCK or REPLICATING. If rule does not exists returns NoRule | admix/interfaces/rucio_summoner.py | CheckRule | XENONnT/admix | 2 | python | def CheckRule(self, upload_structure=None, rse=None, level=(- 1)):
'Check the status message for a Rucio DID or dictionary template rule.\n This is a shortcut in combination with the memberfunction GetRule()\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary. The member\n function GetRule(...) evaluates the upload_structure variable.\n :param: rse: Specify a valid Rucio Storage Element (RSE)\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return: r_status: Rucio rule status: OK, STUCK or REPLICATING. If rule does not exists returns NoRule\n '
if (rse == None):
return 'NoRule'
rule = self.GetRule(upload_structure, rse, level)
r_status = None
if (rule['exists'] == False):
r_status = 'NoRule'
elif ((rule['exists'] == True) and (rule['state'] == 'OK') and (rule['cnt_ok'] > 0) and (rule['cnt_repl'] == 0) and (rule['cnt_stuck'] == 0)):
r_status = 'OK'
elif ((rule['exists'] == True) and (rule['state'] == 'REPLICATING')):
r_status = 'REPLICATING'
elif ((rule['exists'] == True) and (rule['state'] == 'STUCK')):
r_status = 'STUCK'
return r_status | def CheckRule(self, upload_structure=None, rse=None, level=(- 1)):
'Check the status message for a Rucio DID or dictionary template rule.\n This is a shortcut in combination with the memberfunction GetRule()\n\n :param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary. The member\n function GetRule(...) evaluates the upload_structure variable.\n :param: rse: Specify a valid Rucio Storage Element (RSE)\n :param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return: r_status: Rucio rule status: OK, STUCK or REPLICATING. If rule does not exists returns NoRule\n '
if (rse == None):
return 'NoRule'
rule = self.GetRule(upload_structure, rse, level)
r_status = None
if (rule['exists'] == False):
r_status = 'NoRule'
elif ((rule['exists'] == True) and (rule['state'] == 'OK') and (rule['cnt_ok'] > 0) and (rule['cnt_repl'] == 0) and (rule['cnt_stuck'] == 0)):
r_status = 'OK'
elif ((rule['exists'] == True) and (rule['state'] == 'REPLICATING')):
r_status = 'REPLICATING'
elif ((rule['exists'] == True) and (rule['state'] == 'STUCK')):
r_status = 'STUCK'
return r_status<|docstring|>Check the status message for a Rucio DID or dictionary template rule.
This is a shortcut in combination with the memberfunction GetRule()
:param: upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary. The member
function GetRule(...) evaluates the upload_structure variable.
:param: rse: Specify a valid Rucio Storage Element (RSE)
:param: level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return: r_status: Rucio rule status: OK, STUCK or REPLICATING. If rule does not exists returns NoRule<|endoftext|> |
fc30c75cdc779ecf806d532821e7284508acb6677340543154e997fb6feefba9 | def VerifyLocations(self, upload_structure=None, upload_path=None, checksum_test=False, level=(- 1)):
'This function checks if for a given upload structure or Rucio DID a requested\n upload destination rule exists in Rucio already and returns a standardized\n dictionary.\n\n :param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param upload_path: A path which holds the files for the Rucio upload\n :param checksum_test: Enable extended checksum test with True.\n :param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return (check_success, diff_rucio, diff_disk): check_success True if the same files in Rucio such on disk\n otherwise False.\n diff_rucio returns a list of files which are in Rucio but not\n on disk.\n diff_disk returns a list of files which are on disk but not\n attached to the Rucio DID.\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
list_folder_phys = []
list_dirpath_phys = []
list_files_phys = []
for (dirpath, dirnames, filenames) in os.walk(upload_path):
list_dirpath_phys.extend(dirpath)
list_folder_phys.extend(dirnames)
list_files_phys.extend(filenames)
break
nb_files_phys = len(list_files_phys)
list_files_rucio = self._rucio.ListContent(val_scope, val_dname)
rucio_files = []
for i_file in list_files_rucio:
rucio_files.append(i_file['name'])
nb_files_rucio = len(rucio_files)
diff_rucio = list((set(rucio_files) - set(list_files_phys)))
diff_disk = list((set(list_files_phys) - set(rucio_files)))
if (checksum_test == True):
print('Implement a checksum test')
pass
if (nb_files_phys == nb_files_rucio):
return (True, diff_rucio, diff_disk)
else:
return (False, diff_rucio, diff_disk) | This function checks if for a given upload structure or Rucio DID a requested
upload destination rule exists in Rucio already and returns a standardized
dictionary.
:param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param upload_path: A path which holds the files for the Rucio upload
:param checksum_test: Enable extended checksum test with True.
:param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return (check_success, diff_rucio, diff_disk): check_success True if the same files in Rucio such on disk
otherwise False.
diff_rucio returns a list of files which are in Rucio but not
on disk.
diff_disk returns a list of files which are on disk but not
attached to the Rucio DID. | admix/interfaces/rucio_summoner.py | VerifyLocations | XENONnT/admix | 2 | python | def VerifyLocations(self, upload_structure=None, upload_path=None, checksum_test=False, level=(- 1)):
'This function checks if for a given upload structure or Rucio DID a requested\n upload destination rule exists in Rucio already and returns a standardized\n dictionary.\n\n :param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param upload_path: A path which holds the files for the Rucio upload\n :param checksum_test: Enable extended checksum test with True.\n :param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return (check_success, diff_rucio, diff_disk): check_success True if the same files in Rucio such on disk\n otherwise False.\n diff_rucio returns a list of files which are in Rucio but not\n on disk.\n diff_disk returns a list of files which are on disk but not\n attached to the Rucio DID.\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
list_folder_phys = []
list_dirpath_phys = []
list_files_phys = []
for (dirpath, dirnames, filenames) in os.walk(upload_path):
list_dirpath_phys.extend(dirpath)
list_folder_phys.extend(dirnames)
list_files_phys.extend(filenames)
break
nb_files_phys = len(list_files_phys)
list_files_rucio = self._rucio.ListContent(val_scope, val_dname)
rucio_files = []
for i_file in list_files_rucio:
rucio_files.append(i_file['name'])
nb_files_rucio = len(rucio_files)
diff_rucio = list((set(rucio_files) - set(list_files_phys)))
diff_disk = list((set(list_files_phys) - set(rucio_files)))
if (checksum_test == True):
print('Implement a checksum test')
pass
if (nb_files_phys == nb_files_rucio):
return (True, diff_rucio, diff_disk)
else:
return (False, diff_rucio, diff_disk) | def VerifyLocations(self, upload_structure=None, upload_path=None, checksum_test=False, level=(- 1)):
'This function checks if for a given upload structure or Rucio DID a requested\n upload destination rule exists in Rucio already and returns a standardized\n dictionary.\n\n :param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param upload_path: A path which holds the files for the Rucio upload\n :param checksum_test: Enable extended checksum test with True.\n :param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return (check_success, diff_rucio, diff_disk): check_success True if the same files in Rucio such on disk\n otherwise False.\n diff_rucio returns a list of files which are in Rucio but not\n on disk.\n diff_disk returns a list of files which are on disk but not\n attached to the Rucio DID.\n '
(val_scope, val_dname) = self._VerifyStructure(upload_structure, level)
list_folder_phys = []
list_dirpath_phys = []
list_files_phys = []
for (dirpath, dirnames, filenames) in os.walk(upload_path):
list_dirpath_phys.extend(dirpath)
list_folder_phys.extend(dirnames)
list_files_phys.extend(filenames)
break
nb_files_phys = len(list_files_phys)
list_files_rucio = self._rucio.ListContent(val_scope, val_dname)
rucio_files = []
for i_file in list_files_rucio:
rucio_files.append(i_file['name'])
nb_files_rucio = len(rucio_files)
diff_rucio = list((set(rucio_files) - set(list_files_phys)))
diff_disk = list((set(list_files_phys) - set(rucio_files)))
if (checksum_test == True):
print('Implement a checksum test')
pass
if (nb_files_phys == nb_files_rucio):
return (True, diff_rucio, diff_disk)
else:
return (False, diff_rucio, diff_disk)<|docstring|>This function checks if for a given upload structure or Rucio DID a requested
upload destination rule exists in Rucio already and returns a standardized
dictionary.
:param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param upload_path: A path which holds the files for the Rucio upload
:param checksum_test: Enable extended checksum test with True.
:param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return (check_success, diff_rucio, diff_disk): check_success True if the same files in Rucio such on disk
otherwise False.
diff_rucio returns a list of files which are in Rucio but not
on disk.
diff_disk returns a list of files which are on disk but not
attached to the Rucio DID.<|endoftext|> |
3228ca8599ac17f2ec95220998820986b72013c68abf01467aff474e4b68dacb | def DownloadDids(self, dids=None, download_path='.', rse=None, no_subdir=False, transfer_timeout=None, num_threads=3, trace_custom_fields={}):
'Function: DownloadDids(...)\n\n This functions offers to download a list if Rucio DIDs which are given by a list.\n\n :param dids: A string or a list of strings which follows the Rucio DID nameing convention\n :param download_path: Path to store the downloaded data\n :param rse: Specify the RSE from where the data are going to be downloaded\n :param no_subdir: True if no sub directory is going to be created.\n :param transfer_timeout: Wait for so many seconds and try to continue with the download (optional)\n :param num_threads: Standard two (2) threads are used for downloading on a CPU (optional)\n :param trace_custom_fields: Customize download, see Rucio tutorials optional)\n :return result: A list of Rucio download information as a list of dictionaries. If it fails 1\n '
if (dids == None):
return 1
if isinstance(dids, str):
dids = [dids]
dw = []
for i_did in dids:
dw_dict = {}
dw_dict['did'] = i_did
dw_dict['rse'] = rse
dw_dict['base_dir'] = download_path
dw_dict['no_subdir'] = no_subdir
dw_dict['transfer_timeout'] = transfer_timeout
dw.append(dw_dict)
result = self._rucio.DownloadDids(items=dw, num_threads=num_threads, trace_custom_fields=trace_custom_fields)
return result | Function: DownloadDids(...)
This functions offers to download a list if Rucio DIDs which are given by a list.
:param dids: A string or a list of strings which follows the Rucio DID nameing convention
:param download_path: Path to store the downloaded data
:param rse: Specify the RSE from where the data are going to be downloaded
:param no_subdir: True if no sub directory is going to be created.
:param transfer_timeout: Wait for so many seconds and try to continue with the download (optional)
:param num_threads: Standard two (2) threads are used for downloading on a CPU (optional)
:param trace_custom_fields: Customize download, see Rucio tutorials optional)
:return result: A list of Rucio download information as a list of dictionaries. If it fails 1 | admix/interfaces/rucio_summoner.py | DownloadDids | XENONnT/admix | 2 | python | def DownloadDids(self, dids=None, download_path='.', rse=None, no_subdir=False, transfer_timeout=None, num_threads=3, trace_custom_fields={}):
'Function: DownloadDids(...)\n\n This functions offers to download a list if Rucio DIDs which are given by a list.\n\n :param dids: A string or a list of strings which follows the Rucio DID nameing convention\n :param download_path: Path to store the downloaded data\n :param rse: Specify the RSE from where the data are going to be downloaded\n :param no_subdir: True if no sub directory is going to be created.\n :param transfer_timeout: Wait for so many seconds and try to continue with the download (optional)\n :param num_threads: Standard two (2) threads are used for downloading on a CPU (optional)\n :param trace_custom_fields: Customize download, see Rucio tutorials optional)\n :return result: A list of Rucio download information as a list of dictionaries. If it fails 1\n '
if (dids == None):
return 1
if isinstance(dids, str):
dids = [dids]
dw = []
for i_did in dids:
dw_dict = {}
dw_dict['did'] = i_did
dw_dict['rse'] = rse
dw_dict['base_dir'] = download_path
dw_dict['no_subdir'] = no_subdir
dw_dict['transfer_timeout'] = transfer_timeout
dw.append(dw_dict)
result = self._rucio.DownloadDids(items=dw, num_threads=num_threads, trace_custom_fields=trace_custom_fields)
return result | def DownloadDids(self, dids=None, download_path='.', rse=None, no_subdir=False, transfer_timeout=None, num_threads=3, trace_custom_fields={}):
'Function: DownloadDids(...)\n\n This functions offers to download a list if Rucio DIDs which are given by a list.\n\n :param dids: A string or a list of strings which follows the Rucio DID nameing convention\n :param download_path: Path to store the downloaded data\n :param rse: Specify the RSE from where the data are going to be downloaded\n :param no_subdir: True if no sub directory is going to be created.\n :param transfer_timeout: Wait for so many seconds and try to continue with the download (optional)\n :param num_threads: Standard two (2) threads are used for downloading on a CPU (optional)\n :param trace_custom_fields: Customize download, see Rucio tutorials optional)\n :return result: A list of Rucio download information as a list of dictionaries. If it fails 1\n '
if (dids == None):
return 1
if isinstance(dids, str):
dids = [dids]
dw = []
for i_did in dids:
dw_dict = {}
dw_dict['did'] = i_did
dw_dict['rse'] = rse
dw_dict['base_dir'] = download_path
dw_dict['no_subdir'] = no_subdir
dw_dict['transfer_timeout'] = transfer_timeout
dw.append(dw_dict)
result = self._rucio.DownloadDids(items=dw, num_threads=num_threads, trace_custom_fields=trace_custom_fields)
return result<|docstring|>Function: DownloadDids(...)
This functions offers to download a list if Rucio DIDs which are given by a list.
:param dids: A string or a list of strings which follows the Rucio DID nameing convention
:param download_path: Path to store the downloaded data
:param rse: Specify the RSE from where the data are going to be downloaded
:param no_subdir: True if no sub directory is going to be created.
:param transfer_timeout: Wait for so many seconds and try to continue with the download (optional)
:param num_threads: Standard two (2) threads are used for downloading on a CPU (optional)
:param trace_custom_fields: Customize download, see Rucio tutorials optional)
:return result: A list of Rucio download information as a list of dictionaries. If it fails 1<|endoftext|> |
f6dcfae0cf1923050819319e0de9dccfe32180cacba6028992dfec0dd7b45d1e | def DownloadChunks(self, download_structure=None, chunks=None, download_path='.', rse=None, no_subdir=False, transfer_timeout=None, num_threads=2, trace_custom_fields={}, level=(- 1)):
"Function: DownloadChunks(...)\n\n This function offers to download specific chunks from Rucio a specific DiD.\n Warning: This function is heavily made for XENON internal structures. Please use with care.\n\n :param download_structure: A valid Rucio DID (string) or a template dictionary of an existing DID\n :param download_path: Path to store the downloaded data\n :param rse: Specify the RSE from where the data are going to be downloaded\n :param chunks: A list (strings) chunk numbers (Format: XXXXXX (six digits), or metadata.json:\n Example: list=['000000', '000001', 'metadata.json']\n :param no_subdir: True if no sub directory is going to be created.\n :param transfer_timeout: Wait for so many seconds and try to continue with the download (optional)\n :param num_threads: Standard two (2) threads are used for downloading on a CPU (optional)\n :param trace_custom_fields: Customize download, see Rucio tutorials optional)\n :param level: Specify the download DID from a template dictionary if it is hand over (optional, the last level\n is chosen)\n :return result: A list of Rucio download information as a list of dictionaries. If it fails 1\n "
(val_scope, val_dname) = self._VerifyStructure(download_structure, level)
dw = []
for i_chunk in chunks:
dw_dict = {}
dw_dict['did'] = '{0}:{1}-{2}'.format(val_scope, val_dname, i_chunk)
dw_dict['rse'] = rse
dw_dict['base_dir'] = download_path
dw_dict['no_subdir'] = no_subdir
dw_dict['transfer_timeout'] = transfer_timeout
dw.append(dw_dict)
result = self._rucio.DownloadDids(dw, num_threads, trace_custom_fields)
return result | Function: DownloadChunks(...)
This function offers to download specific chunks from Rucio a specific DiD.
Warning: This function is heavily made for XENON internal structures. Please use with care.
:param download_structure: A valid Rucio DID (string) or a template dictionary of an existing DID
:param download_path: Path to store the downloaded data
:param rse: Specify the RSE from where the data are going to be downloaded
:param chunks: A list (strings) chunk numbers (Format: XXXXXX (six digits), or metadata.json:
Example: list=['000000', '000001', 'metadata.json']
:param no_subdir: True if no sub directory is going to be created.
:param transfer_timeout: Wait for so many seconds and try to continue with the download (optional)
:param num_threads: Standard two (2) threads are used for downloading on a CPU (optional)
:param trace_custom_fields: Customize download, see Rucio tutorials optional)
:param level: Specify the download DID from a template dictionary if it is hand over (optional, the last level
is chosen)
:return result: A list of Rucio download information as a list of dictionaries. If it fails 1 | admix/interfaces/rucio_summoner.py | DownloadChunks | XENONnT/admix | 2 | python | def DownloadChunks(self, download_structure=None, chunks=None, download_path='.', rse=None, no_subdir=False, transfer_timeout=None, num_threads=2, trace_custom_fields={}, level=(- 1)):
"Function: DownloadChunks(...)\n\n This function offers to download specific chunks from Rucio a specific DiD.\n Warning: This function is heavily made for XENON internal structures. Please use with care.\n\n :param download_structure: A valid Rucio DID (string) or a template dictionary of an existing DID\n :param download_path: Path to store the downloaded data\n :param rse: Specify the RSE from where the data are going to be downloaded\n :param chunks: A list (strings) chunk numbers (Format: XXXXXX (six digits), or metadata.json:\n Example: list=['000000', '000001', 'metadata.json']\n :param no_subdir: True if no sub directory is going to be created.\n :param transfer_timeout: Wait for so many seconds and try to continue with the download (optional)\n :param num_threads: Standard two (2) threads are used for downloading on a CPU (optional)\n :param trace_custom_fields: Customize download, see Rucio tutorials optional)\n :param level: Specify the download DID from a template dictionary if it is hand over (optional, the last level\n is chosen)\n :return result: A list of Rucio download information as a list of dictionaries. If it fails 1\n "
(val_scope, val_dname) = self._VerifyStructure(download_structure, level)
dw = []
for i_chunk in chunks:
dw_dict = {}
dw_dict['did'] = '{0}:{1}-{2}'.format(val_scope, val_dname, i_chunk)
dw_dict['rse'] = rse
dw_dict['base_dir'] = download_path
dw_dict['no_subdir'] = no_subdir
dw_dict['transfer_timeout'] = transfer_timeout
dw.append(dw_dict)
result = self._rucio.DownloadDids(dw, num_threads, trace_custom_fields)
return result | def DownloadChunks(self, download_structure=None, chunks=None, download_path='.', rse=None, no_subdir=False, transfer_timeout=None, num_threads=2, trace_custom_fields={}, level=(- 1)):
"Function: DownloadChunks(...)\n\n This function offers to download specific chunks from Rucio a specific DiD.\n Warning: This function is heavily made for XENON internal structures. Please use with care.\n\n :param download_structure: A valid Rucio DID (string) or a template dictionary of an existing DID\n :param download_path: Path to store the downloaded data\n :param rse: Specify the RSE from where the data are going to be downloaded\n :param chunks: A list (strings) chunk numbers (Format: XXXXXX (six digits), or metadata.json:\n Example: list=['000000', '000001', 'metadata.json']\n :param no_subdir: True if no sub directory is going to be created.\n :param transfer_timeout: Wait for so many seconds and try to continue with the download (optional)\n :param num_threads: Standard two (2) threads are used for downloading on a CPU (optional)\n :param trace_custom_fields: Customize download, see Rucio tutorials optional)\n :param level: Specify the download DID from a template dictionary if it is hand over (optional, the last level\n is chosen)\n :return result: A list of Rucio download information as a list of dictionaries. If it fails 1\n "
(val_scope, val_dname) = self._VerifyStructure(download_structure, level)
dw = []
for i_chunk in chunks:
dw_dict = {}
dw_dict['did'] = '{0}:{1}-{2}'.format(val_scope, val_dname, i_chunk)
dw_dict['rse'] = rse
dw_dict['base_dir'] = download_path
dw_dict['no_subdir'] = no_subdir
dw_dict['transfer_timeout'] = transfer_timeout
dw.append(dw_dict)
result = self._rucio.DownloadDids(dw, num_threads, trace_custom_fields)
return result<|docstring|>Function: DownloadChunks(...)
This function offers to download specific chunks from Rucio a specific DiD.
Warning: This function is heavily made for XENON internal structures. Please use with care.
:param download_structure: A valid Rucio DID (string) or a template dictionary of an existing DID
:param download_path: Path to store the downloaded data
:param rse: Specify the RSE from where the data are going to be downloaded
:param chunks: A list (strings) chunk numbers (Format: XXXXXX (six digits), or metadata.json:
Example: list=['000000', '000001', 'metadata.json']
:param no_subdir: True if no sub directory is going to be created.
:param transfer_timeout: Wait for so many seconds and try to continue with the download (optional)
:param num_threads: Standard two (2) threads are used for downloading on a CPU (optional)
:param trace_custom_fields: Customize download, see Rucio tutorials optional)
:param level: Specify the download DID from a template dictionary if it is hand over (optional, the last level
is chosen)
:return result: A list of Rucio download information as a list of dictionaries. If it fails 1<|endoftext|> |
b56785c88c41fd43045099c02ed02e1a99ddcba2485a02fc5c7a6b48a225e57f | def Download(self, download_structure=None, download_path='.', rse=None, no_subdir=False, transfer_timeout=None, num_threads=2, trace_custom_fields={}, level=(- 1)):
'Function: Download(...)\n\n This function offers to download from Rucio a specific DiD.\n\n :param download_structure: A valid Rucio DID (string) or a template dictionary of an existing DID\n :param download_path: Path to store the downloaded data\n :param rse: Specify the RSE from where the data are going to be downloaded\n :param no_subdir: True if no sub directory is going to be created.\n :param transfer_timeout: Wait for so many seconds and try to continue with the download (optinal)\n :param num_threads: Standard two (2) threads are used for downloading on a CPU (optinal)\n :param trace_custom_fields: Customize download, see Rucio tutorials (optinal)\n :param level: Specify the download DID from a template dictionary if it is hand over (optional, the last level\n is chosen)\n :return result: A list of Rucio download information as a list of dictionaries. If it fails 1\n '
(val_scope, val_dname) = self._VerifyStructure(download_structure, level)
dw_dict = {}
dw_dict['did'] = '{0}:{1}'.format(val_scope, val_dname)
dw_dict['rse'] = rse
dw_dict['base_dir'] = download_path
dw_dict['no_subdir'] = no_subdir
dw_dict['transfer_timeout'] = transfer_timeout
result = self._rucio.DownloadDids(dw_dict, num_threads, trace_custom_fields)
return result | Function: Download(...)
This function offers to download from Rucio a specific DiD.
:param download_structure: A valid Rucio DID (string) or a template dictionary of an existing DID
:param download_path: Path to store the downloaded data
:param rse: Specify the RSE from where the data are going to be downloaded
:param no_subdir: True if no sub directory is going to be created.
:param transfer_timeout: Wait for so many seconds and try to continue with the download (optinal)
:param num_threads: Standard two (2) threads are used for downloading on a CPU (optinal)
:param trace_custom_fields: Customize download, see Rucio tutorials (optinal)
:param level: Specify the download DID from a template dictionary if it is hand over (optional, the last level
is chosen)
:return result: A list of Rucio download information as a list of dictionaries. If it fails 1 | admix/interfaces/rucio_summoner.py | Download | XENONnT/admix | 2 | python | def Download(self, download_structure=None, download_path='.', rse=None, no_subdir=False, transfer_timeout=None, num_threads=2, trace_custom_fields={}, level=(- 1)):
'Function: Download(...)\n\n This function offers to download from Rucio a specific DiD.\n\n :param download_structure: A valid Rucio DID (string) or a template dictionary of an existing DID\n :param download_path: Path to store the downloaded data\n :param rse: Specify the RSE from where the data are going to be downloaded\n :param no_subdir: True if no sub directory is going to be created.\n :param transfer_timeout: Wait for so many seconds and try to continue with the download (optinal)\n :param num_threads: Standard two (2) threads are used for downloading on a CPU (optinal)\n :param trace_custom_fields: Customize download, see Rucio tutorials (optinal)\n :param level: Specify the download DID from a template dictionary if it is hand over (optional, the last level\n is chosen)\n :return result: A list of Rucio download information as a list of dictionaries. If it fails 1\n '
(val_scope, val_dname) = self._VerifyStructure(download_structure, level)
dw_dict = {}
dw_dict['did'] = '{0}:{1}'.format(val_scope, val_dname)
dw_dict['rse'] = rse
dw_dict['base_dir'] = download_path
dw_dict['no_subdir'] = no_subdir
dw_dict['transfer_timeout'] = transfer_timeout
result = self._rucio.DownloadDids(dw_dict, num_threads, trace_custom_fields)
return result | def Download(self, download_structure=None, download_path='.', rse=None, no_subdir=False, transfer_timeout=None, num_threads=2, trace_custom_fields={}, level=(- 1)):
'Function: Download(...)\n\n This function offers to download from Rucio a specific DiD.\n\n :param download_structure: A valid Rucio DID (string) or a template dictionary of an existing DID\n :param download_path: Path to store the downloaded data\n :param rse: Specify the RSE from where the data are going to be downloaded\n :param no_subdir: True if no sub directory is going to be created.\n :param transfer_timeout: Wait for so many seconds and try to continue with the download (optinal)\n :param num_threads: Standard two (2) threads are used for downloading on a CPU (optinal)\n :param trace_custom_fields: Customize download, see Rucio tutorials (optinal)\n :param level: Specify the download DID from a template dictionary if it is hand over (optional, the last level\n is chosen)\n :return result: A list of Rucio download information as a list of dictionaries. If it fails 1\n '
(val_scope, val_dname) = self._VerifyStructure(download_structure, level)
dw_dict = {}
dw_dict['did'] = '{0}:{1}'.format(val_scope, val_dname)
dw_dict['rse'] = rse
dw_dict['base_dir'] = download_path
dw_dict['no_subdir'] = no_subdir
dw_dict['transfer_timeout'] = transfer_timeout
result = self._rucio.DownloadDids(dw_dict, num_threads, trace_custom_fields)
return result<|docstring|>Function: Download(...)
This function offers to download from Rucio a specific DiD.
:param download_structure: A valid Rucio DID (string) or a template dictionary of an existing DID
:param download_path: Path to store the downloaded data
:param rse: Specify the RSE from where the data are going to be downloaded
:param no_subdir: True if no sub directory is going to be created.
:param transfer_timeout: Wait for so many seconds and try to continue with the download (optinal)
:param num_threads: Standard two (2) threads are used for downloading on a CPU (optinal)
:param trace_custom_fields: Customize download, see Rucio tutorials (optinal)
:param level: Specify the download DID from a template dictionary if it is hand over (optional, the last level
is chosen)
:return result: A list of Rucio download information as a list of dictionaries. If it fails 1<|endoftext|> |
7156efcdd263ffeed5a8ac8d47a622a210c5a502982a0487f1ad278788ccea46 | def UploadToScope(self, scope=None, upload_path=None, rse=None, rse_lifetime=None):
"Function: UploadToScope()\n\n Upload a folder to a Rucio scope\n\n :param scope: A string which follows the rules of Rucio string\n :param upload_path: A valid (string) to a folder which holds a file (or files) for upload\n :param rse: A valid Rucio Storage Element (RSE)\n :param rse_lifetime: A valid (int) which defines the lifetime of the transfer rule after upload.\n :return result: (upload_status, rse_rule) means:\n\n * (0, {'result': 0, 'lifetime': rse_lifetime}) for success and applied lifetime to the rule\n\n * (0, 1) for success and no rse_lifetime to the rule\n\n * (1, 1) for upload failure and rse_lifetime is not given\n\n * (1, {'result':1, 'lifetime': rse_lifetime}) for upload failure and rse_lifetime is skipped automatically\n "
result = 1
result_rule = 1
if (isinstance(scope, str) == False):
print('Function UploadToScope() needs an Rucio (str) scope as input')
exit(1)
if (rse == None):
print('No Rucio Storage Element (rse) given.')
exit(1)
if (upload_path == None):
print('No path/file given for upload')
exit(1)
list_files_phys = []
for (dirpath, dirnames, filenames) in os.walk(upload_path):
list_files_phys.extend(filenames)
break
result = self._rucio.CreateScope(account=self.rucio_account, scope=scope)
upload_dict = {}
upload_dict['path'] = (upload_path + '/')
upload_dict['rse'] = rse
upload_dict['did_scope'] = scope
result = self._rucio.Upload(upload_dict=[upload_dict])
if ((rse_lifetime != None) and isinstance(rse_lifetime, int) and (rse_lifetime > 0)):
rule = [f'rucio-catalogue:{rse}:{rse_lifetime}']
result_rule_count = []
for i_file in list_files_phys:
upload_structure = f'{scope}:{i_file}'
result_count = self.UpdateRules(upload_structure=upload_structure, rse_rules=rule)
result_rule_count.append(result_count.get(rse, 1))
k_count = 0
for ik in result_rule_count:
k_count += ik['result']
if (k_count == 0):
result_rule = 0
return (result, result_rule) | Function: UploadToScope()
Upload a folder to a Rucio scope
:param scope: A string which follows the rules of Rucio string
:param upload_path: A valid (string) to a folder which holds a file (or files) for upload
:param rse: A valid Rucio Storage Element (RSE)
:param rse_lifetime: A valid (int) which defines the lifetime of the transfer rule after upload.
:return result: (upload_status, rse_rule) means:
* (0, {'result': 0, 'lifetime': rse_lifetime}) for success and applied lifetime to the rule
* (0, 1) for success and no rse_lifetime to the rule
* (1, 1) for upload failure and rse_lifetime is not given
* (1, {'result':1, 'lifetime': rse_lifetime}) for upload failure and rse_lifetime is skipped automatically | admix/interfaces/rucio_summoner.py | UploadToScope | XENONnT/admix | 2 | python | def UploadToScope(self, scope=None, upload_path=None, rse=None, rse_lifetime=None):
"Function: UploadToScope()\n\n Upload a folder to a Rucio scope\n\n :param scope: A string which follows the rules of Rucio string\n :param upload_path: A valid (string) to a folder which holds a file (or files) for upload\n :param rse: A valid Rucio Storage Element (RSE)\n :param rse_lifetime: A valid (int) which defines the lifetime of the transfer rule after upload.\n :return result: (upload_status, rse_rule) means:\n\n * (0, {'result': 0, 'lifetime': rse_lifetime}) for success and applied lifetime to the rule\n\n * (0, 1) for success and no rse_lifetime to the rule\n\n * (1, 1) for upload failure and rse_lifetime is not given\n\n * (1, {'result':1, 'lifetime': rse_lifetime}) for upload failure and rse_lifetime is skipped automatically\n "
result = 1
result_rule = 1
if (isinstance(scope, str) == False):
print('Function UploadToScope() needs an Rucio (str) scope as input')
exit(1)
if (rse == None):
print('No Rucio Storage Element (rse) given.')
exit(1)
if (upload_path == None):
print('No path/file given for upload')
exit(1)
list_files_phys = []
for (dirpath, dirnames, filenames) in os.walk(upload_path):
list_files_phys.extend(filenames)
break
result = self._rucio.CreateScope(account=self.rucio_account, scope=scope)
upload_dict = {}
upload_dict['path'] = (upload_path + '/')
upload_dict['rse'] = rse
upload_dict['did_scope'] = scope
result = self._rucio.Upload(upload_dict=[upload_dict])
if ((rse_lifetime != None) and isinstance(rse_lifetime, int) and (rse_lifetime > 0)):
rule = [f'rucio-catalogue:{rse}:{rse_lifetime}']
result_rule_count = []
for i_file in list_files_phys:
upload_structure = f'{scope}:{i_file}'
result_count = self.UpdateRules(upload_structure=upload_structure, rse_rules=rule)
result_rule_count.append(result_count.get(rse, 1))
k_count = 0
for ik in result_rule_count:
k_count += ik['result']
if (k_count == 0):
result_rule = 0
return (result, result_rule) | def UploadToScope(self, scope=None, upload_path=None, rse=None, rse_lifetime=None):
"Function: UploadToScope()\n\n Upload a folder to a Rucio scope\n\n :param scope: A string which follows the rules of Rucio string\n :param upload_path: A valid (string) to a folder which holds a file (or files) for upload\n :param rse: A valid Rucio Storage Element (RSE)\n :param rse_lifetime: A valid (int) which defines the lifetime of the transfer rule after upload.\n :return result: (upload_status, rse_rule) means:\n\n * (0, {'result': 0, 'lifetime': rse_lifetime}) for success and applied lifetime to the rule\n\n * (0, 1) for success and no rse_lifetime to the rule\n\n * (1, 1) for upload failure and rse_lifetime is not given\n\n * (1, {'result':1, 'lifetime': rse_lifetime}) for upload failure and rse_lifetime is skipped automatically\n "
result = 1
result_rule = 1
if (isinstance(scope, str) == False):
print('Function UploadToScope() needs an Rucio (str) scope as input')
exit(1)
if (rse == None):
print('No Rucio Storage Element (rse) given.')
exit(1)
if (upload_path == None):
print('No path/file given for upload')
exit(1)
list_files_phys = []
for (dirpath, dirnames, filenames) in os.walk(upload_path):
list_files_phys.extend(filenames)
break
result = self._rucio.CreateScope(account=self.rucio_account, scope=scope)
upload_dict = {}
upload_dict['path'] = (upload_path + '/')
upload_dict['rse'] = rse
upload_dict['did_scope'] = scope
result = self._rucio.Upload(upload_dict=[upload_dict])
if ((rse_lifetime != None) and isinstance(rse_lifetime, int) and (rse_lifetime > 0)):
rule = [f'rucio-catalogue:{rse}:{rse_lifetime}']
result_rule_count = []
for i_file in list_files_phys:
upload_structure = f'{scope}:{i_file}'
result_count = self.UpdateRules(upload_structure=upload_structure, rse_rules=rule)
result_rule_count.append(result_count.get(rse, 1))
k_count = 0
for ik in result_rule_count:
k_count += ik['result']
if (k_count == 0):
result_rule = 0
return (result, result_rule)<|docstring|>Function: UploadToScope()
Upload a folder to a Rucio scope
:param scope: A string which follows the rules of Rucio string
:param upload_path: A valid (string) to a folder which holds a file (or files) for upload
:param rse: A valid Rucio Storage Element (RSE)
:param rse_lifetime: A valid (int) which defines the lifetime of the transfer rule after upload.
:return result: (upload_status, rse_rule) means:
* (0, {'result': 0, 'lifetime': rse_lifetime}) for success and applied lifetime to the rule
* (0, 1) for success and no rse_lifetime to the rule
* (1, 1) for upload failure and rse_lifetime is not given
* (1, {'result':1, 'lifetime': rse_lifetime}) for upload failure and rse_lifetime is skipped automatically<|endoftext|> |
459e2627d14b2a1765d310dd22a63ad2572939cc7b09376e93916a908dfa84f7 | def UploadToDid(self, upload_structure=None, upload_path=None, rse=None, rse_lifetime=None):
"Function UploadToDid()\n\n This function uploads the content of given folder into a Rucio dataset\n which is identified by given DID.\n\n For example a folder:\n\n | /path/to/example/calibration_source_1\n | │\n | ├──18_t2_01\n | ├──18_t2_02\n | └──18_t2_03\n\n DID (dataset): calibration_data_day1:calibration_source_1\n\n Results a Rucio structure:\n\n | calibration_data_day1:calibration_source_1 (Rucio dataset)\n | │\n | ├──calibration_data_day1:18_t2_01 (Rucio file attached to dataset)\n | ├──calibration_data_day1:18_t2_02 (Rucio file attached to dataset)\n | └──calibration_data_day1:18_t2_03 (Rucio file attached to dataset)\n\n\n :param upload_structure: A Rucio DID (type str) in form of scope:name\n :param upload_path: A path to an existing folder with data for upload\n :param rse: A valid Rucio Storage Element (RSE) for the initial upload\n :param rse_lifetime: (Optional) A lifetime in seconds\n :return result: (upload_status, rse_rule) means:\n\n * (0, {'result': 0, 'lifetime': rse_lifetime}) for success and applied lifetime to the rule\n\n * (0, 1) for success and no rse_lifetime to the rule\n\n * (1, 1) for upload failure and rse_lifetime is not give\n\n * (1, {'result':1, 'lifetime': rse_lifetime}) for upload failure and rse_lifetime is skipped automatically\n "
result = 1
result_rule = 1
if ((isinstance(upload_structure, str) == False) and (':' not in upload_structure)):
print('Function UploadDid() needs an Rucio (str) DID as input (scope:name)')
return (1, 1)
if (rse == None):
print('No Rucio Storage Element (rse) given.')
return (1, 1)
if (upload_path == None):
print('No path/file given for upload')
return (1, 1)
upload_scope = upload_structure.split(':')[0]
upload_dname = upload_structure.split(':')[1]
result = self._rucio.CreateScope(account=self.rucio_account, scope=upload_scope)
upload_dict = {}
upload_dict['path'] = (upload_path + '/')
upload_dict['rse'] = rse
upload_dict['lifetime'] = rse_lifetime
upload_dict['dataset_scope'] = upload_scope
upload_dict['dataset_name'] = upload_dname
upload_dict['did_scope'] = upload_scope
result = self._rucio.Upload(upload_dict=[upload_dict])
if ((rse_lifetime != None) and isinstance(rse_lifetime, int) and (rse_lifetime > 0) and (result == 0)):
rule = [f'rucio-catalogue:{rse}:{rse_lifetime}']
result_rule = self.UpdateRules(upload_structure=upload_structure, rse_rules=rule)
result_rule = result_rule.get(rse, 1)
return (result, result_rule) | Function UploadToDid()
This function uploads the content of given folder into a Rucio dataset
which is identified by given DID.
For example a folder:
| /path/to/example/calibration_source_1
| │
| ├──18_t2_01
| ├──18_t2_02
| └──18_t2_03
DID (dataset): calibration_data_day1:calibration_source_1
Results a Rucio structure:
| calibration_data_day1:calibration_source_1 (Rucio dataset)
| │
| ├──calibration_data_day1:18_t2_01 (Rucio file attached to dataset)
| ├──calibration_data_day1:18_t2_02 (Rucio file attached to dataset)
| └──calibration_data_day1:18_t2_03 (Rucio file attached to dataset)
:param upload_structure: A Rucio DID (type str) in form of scope:name
:param upload_path: A path to an existing folder with data for upload
:param rse: A valid Rucio Storage Element (RSE) for the initial upload
:param rse_lifetime: (Optional) A lifetime in seconds
:return result: (upload_status, rse_rule) means:
* (0, {'result': 0, 'lifetime': rse_lifetime}) for success and applied lifetime to the rule
* (0, 1) for success and no rse_lifetime to the rule
* (1, 1) for upload failure and rse_lifetime is not give
* (1, {'result':1, 'lifetime': rse_lifetime}) for upload failure and rse_lifetime is skipped automatically | admix/interfaces/rucio_summoner.py | UploadToDid | XENONnT/admix | 2 | python | def UploadToDid(self, upload_structure=None, upload_path=None, rse=None, rse_lifetime=None):
"Function UploadToDid()\n\n This function uploads the content of given folder into a Rucio dataset\n which is identified by given DID.\n\n For example a folder:\n\n | /path/to/example/calibration_source_1\n | │\n | ├──18_t2_01\n | ├──18_t2_02\n | └──18_t2_03\n\n DID (dataset): calibration_data_day1:calibration_source_1\n\n Results a Rucio structure:\n\n | calibration_data_day1:calibration_source_1 (Rucio dataset)\n | │\n | ├──calibration_data_day1:18_t2_01 (Rucio file attached to dataset)\n | ├──calibration_data_day1:18_t2_02 (Rucio file attached to dataset)\n | └──calibration_data_day1:18_t2_03 (Rucio file attached to dataset)\n\n\n :param upload_structure: A Rucio DID (type str) in form of scope:name\n :param upload_path: A path to an existing folder with data for upload\n :param rse: A valid Rucio Storage Element (RSE) for the initial upload\n :param rse_lifetime: (Optional) A lifetime in seconds\n :return result: (upload_status, rse_rule) means:\n\n * (0, {'result': 0, 'lifetime': rse_lifetime}) for success and applied lifetime to the rule\n\n * (0, 1) for success and no rse_lifetime to the rule\n\n * (1, 1) for upload failure and rse_lifetime is not give\n\n * (1, {'result':1, 'lifetime': rse_lifetime}) for upload failure and rse_lifetime is skipped automatically\n "
result = 1
result_rule = 1
if ((isinstance(upload_structure, str) == False) and (':' not in upload_structure)):
print('Function UploadDid() needs an Rucio (str) DID as input (scope:name)')
return (1, 1)
if (rse == None):
print('No Rucio Storage Element (rse) given.')
return (1, 1)
if (upload_path == None):
print('No path/file given for upload')
return (1, 1)
upload_scope = upload_structure.split(':')[0]
upload_dname = upload_structure.split(':')[1]
result = self._rucio.CreateScope(account=self.rucio_account, scope=upload_scope)
upload_dict = {}
upload_dict['path'] = (upload_path + '/')
upload_dict['rse'] = rse
upload_dict['lifetime'] = rse_lifetime
upload_dict['dataset_scope'] = upload_scope
upload_dict['dataset_name'] = upload_dname
upload_dict['did_scope'] = upload_scope
result = self._rucio.Upload(upload_dict=[upload_dict])
if ((rse_lifetime != None) and isinstance(rse_lifetime, int) and (rse_lifetime > 0) and (result == 0)):
rule = [f'rucio-catalogue:{rse}:{rse_lifetime}']
result_rule = self.UpdateRules(upload_structure=upload_structure, rse_rules=rule)
result_rule = result_rule.get(rse, 1)
return (result, result_rule) | def UploadToDid(self, upload_structure=None, upload_path=None, rse=None, rse_lifetime=None):
"Function UploadToDid()\n\n This function uploads the content of given folder into a Rucio dataset\n which is identified by given DID.\n\n For example a folder:\n\n | /path/to/example/calibration_source_1\n | │\n | ├──18_t2_01\n | ├──18_t2_02\n | └──18_t2_03\n\n DID (dataset): calibration_data_day1:calibration_source_1\n\n Results a Rucio structure:\n\n | calibration_data_day1:calibration_source_1 (Rucio dataset)\n | │\n | ├──calibration_data_day1:18_t2_01 (Rucio file attached to dataset)\n | ├──calibration_data_day1:18_t2_02 (Rucio file attached to dataset)\n | └──calibration_data_day1:18_t2_03 (Rucio file attached to dataset)\n\n\n :param upload_structure: A Rucio DID (type str) in form of scope:name\n :param upload_path: A path to an existing folder with data for upload\n :param rse: A valid Rucio Storage Element (RSE) for the initial upload\n :param rse_lifetime: (Optional) A lifetime in seconds\n :return result: (upload_status, rse_rule) means:\n\n * (0, {'result': 0, 'lifetime': rse_lifetime}) for success and applied lifetime to the rule\n\n * (0, 1) for success and no rse_lifetime to the rule\n\n * (1, 1) for upload failure and rse_lifetime is not give\n\n * (1, {'result':1, 'lifetime': rse_lifetime}) for upload failure and rse_lifetime is skipped automatically\n "
result = 1
result_rule = 1
if ((isinstance(upload_structure, str) == False) and (':' not in upload_structure)):
print('Function UploadDid() needs an Rucio (str) DID as input (scope:name)')
return (1, 1)
if (rse == None):
print('No Rucio Storage Element (rse) given.')
return (1, 1)
if (upload_path == None):
print('No path/file given for upload')
return (1, 1)
upload_scope = upload_structure.split(':')[0]
upload_dname = upload_structure.split(':')[1]
result = self._rucio.CreateScope(account=self.rucio_account, scope=upload_scope)
upload_dict = {}
upload_dict['path'] = (upload_path + '/')
upload_dict['rse'] = rse
upload_dict['lifetime'] = rse_lifetime
upload_dict['dataset_scope'] = upload_scope
upload_dict['dataset_name'] = upload_dname
upload_dict['did_scope'] = upload_scope
result = self._rucio.Upload(upload_dict=[upload_dict])
if ((rse_lifetime != None) and isinstance(rse_lifetime, int) and (rse_lifetime > 0) and (result == 0)):
rule = [f'rucio-catalogue:{rse}:{rse_lifetime}']
result_rule = self.UpdateRules(upload_structure=upload_structure, rse_rules=rule)
result_rule = result_rule.get(rse, 1)
return (result, result_rule)<|docstring|>Function UploadToDid()
This function uploads the content of given folder into a Rucio dataset
which is identified by given DID.
For example a folder:
| /path/to/example/calibration_source_1
| │
| ├──18_t2_01
| ├──18_t2_02
| └──18_t2_03
DID (dataset): calibration_data_day1:calibration_source_1
Results a Rucio structure:
| calibration_data_day1:calibration_source_1 (Rucio dataset)
| │
| ├──calibration_data_day1:18_t2_01 (Rucio file attached to dataset)
| ├──calibration_data_day1:18_t2_02 (Rucio file attached to dataset)
| └──calibration_data_day1:18_t2_03 (Rucio file attached to dataset)
:param upload_structure: A Rucio DID (type str) in form of scope:name
:param upload_path: A path to an existing folder with data for upload
:param rse: A valid Rucio Storage Element (RSE) for the initial upload
:param rse_lifetime: (Optional) A lifetime in seconds
:return result: (upload_status, rse_rule) means:
* (0, {'result': 0, 'lifetime': rse_lifetime}) for success and applied lifetime to the rule
* (0, 1) for success and no rse_lifetime to the rule
* (1, 1) for upload failure and rse_lifetime is not give
* (1, {'result':1, 'lifetime': rse_lifetime}) for upload failure and rse_lifetime is skipped automatically<|endoftext|> |
70e5c6f2ce72c71b9012d74024b12705d9c6d184dc4815c60cd9e75fb515d484 | def Upload(self, did, upload_path, rse, lifetime=None):
'Function: Upload(...)\n The data files of the upload_path are always uploaded to the last Rucio dataset.\n\n :param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param upload_path: The absolute path of your dataset\n :param rse: A valid Rucio Storage Element (RSE) for the upload\n :param rse_lifetime: The lifetime of the dataset (lowest level of template dictionary) after the upload\n Hint: dataset lifetimes below 24h (86400 sec) are automatically set to 86400 sec.\n :param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return result: 0 for success, 1 for failure\n '
(scope, dataset) = did.split(':')
logger.debug(f"Creating scope {scope}, if it doesn't exist")
self._rucio.CreateScope(account=self.rucio_account, scope=scope)
if ((lifetime != None) and (int(lifetime) < 86400)):
lifetime = 86400
upload_dict = dict(path=upload_path, rse=rse, lifetime=lifetime, did_scope=scope, dataset_scope=scope, dataset_name=dataset)
result = self._rucio.Upload(upload_dict=[upload_dict])
return result | Function: Upload(...)
The data files of the upload_path are always uploaded to the last Rucio dataset.
:param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param upload_path: The absolute path of your dataset
:param rse: A valid Rucio Storage Element (RSE) for the upload
:param rse_lifetime: The lifetime of the dataset (lowest level of template dictionary) after the upload
Hint: dataset lifetimes below 24h (86400 sec) are automatically set to 86400 sec.
:param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return result: 0 for success, 1 for failure | admix/interfaces/rucio_summoner.py | Upload | XENONnT/admix | 2 | python | def Upload(self, did, upload_path, rse, lifetime=None):
'Function: Upload(...)\n The data files of the upload_path are always uploaded to the last Rucio dataset.\n\n :param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param upload_path: The absolute path of your dataset\n :param rse: A valid Rucio Storage Element (RSE) for the upload\n :param rse_lifetime: The lifetime of the dataset (lowest level of template dictionary) after the upload\n Hint: dataset lifetimes below 24h (86400 sec) are automatically set to 86400 sec.\n :param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return result: 0 for success, 1 for failure\n '
(scope, dataset) = did.split(':')
logger.debug(f"Creating scope {scope}, if it doesn't exist")
self._rucio.CreateScope(account=self.rucio_account, scope=scope)
if ((lifetime != None) and (int(lifetime) < 86400)):
lifetime = 86400
upload_dict = dict(path=upload_path, rse=rse, lifetime=lifetime, did_scope=scope, dataset_scope=scope, dataset_name=dataset)
result = self._rucio.Upload(upload_dict=[upload_dict])
return result | def Upload(self, did, upload_path, rse, lifetime=None):
'Function: Upload(...)\n The data files of the upload_path are always uploaded to the last Rucio dataset.\n\n :param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary\n :param upload_path: The absolute path of your dataset\n :param rse: A valid Rucio Storage Element (RSE) for the upload\n :param rse_lifetime: The lifetime of the dataset (lowest level of template dictionary) after the upload\n Hint: dataset lifetimes below 24h (86400 sec) are automatically set to 86400 sec.\n :param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at\n which the \'did\' is chosen from.\n :return result: 0 for success, 1 for failure\n '
(scope, dataset) = did.split(':')
logger.debug(f"Creating scope {scope}, if it doesn't exist")
self._rucio.CreateScope(account=self.rucio_account, scope=scope)
if ((lifetime != None) and (int(lifetime) < 86400)):
lifetime = 86400
upload_dict = dict(path=upload_path, rse=rse, lifetime=lifetime, did_scope=scope, dataset_scope=scope, dataset_name=dataset)
result = self._rucio.Upload(upload_dict=[upload_dict])
return result<|docstring|>Function: Upload(...)
The data files of the upload_path are always uploaded to the last Rucio dataset.
:param upload_structure: A string (Rucio DID form of "scope:name") or a template dictionary
:param upload_path: The absolute path of your dataset
:param rse: A valid Rucio Storage Element (RSE) for the upload
:param rse_lifetime: The lifetime of the dataset (lowest level of template dictionary) after the upload
Hint: dataset lifetimes below 24h (86400 sec) are automatically set to 86400 sec.
:param level: If a template dictionary is used, the level refers to the depth of the sorted dictionary at
which the 'did' is chosen from.
:return result: 0 for success, 1 for failure<|endoftext|> |
2f5b7c977eb6c84bc1bba8c56526d7ebabb33da19b811c719920293c0f0cb390 | def palindroma(palabra):
'\n Función para detectar palabras palíndromas.\n \n Args: palabra.\n \n Salida: Confirmación si la palabra introducida es palíndroma (o no).\n '
palabra = palabra.lower()
palabra_inv = palabra[::(- 1)]
if (palabra_inv == palabra):
return print(f'La palabra introducida "{palabra}" es palíndroma')
else:
return print(f'La palabra introducida "{palabra}" NO es palíndroma') | Función para detectar palabras palíndromas.
Args: palabra.
Salida: Confirmación si la palabra introducida es palíndroma (o no). | ejercicios_basicos/funciones/ejerc7_T10.py | palindroma | JuanDuran85/ejemplos_python | 0 | python | def palindroma(palabra):
'\n Función para detectar palabras palíndromas.\n \n Args: palabra.\n \n Salida: Confirmación si la palabra introducida es palíndroma (o no).\n '
palabra = palabra.lower()
palabra_inv = palabra[::(- 1)]
if (palabra_inv == palabra):
return print(f'La palabra introducida "{palabra}" es palíndroma')
else:
return print(f'La palabra introducida "{palabra}" NO es palíndroma') | def palindroma(palabra):
'\n Función para detectar palabras palíndromas.\n \n Args: palabra.\n \n Salida: Confirmación si la palabra introducida es palíndroma (o no).\n '
palabra = palabra.lower()
palabra_inv = palabra[::(- 1)]
if (palabra_inv == palabra):
return print(f'La palabra introducida "{palabra}" es palíndroma')
else:
return print(f'La palabra introducida "{palabra}" NO es palíndroma')<|docstring|>Función para detectar palabras palíndromas.
Args: palabra.
Salida: Confirmación si la palabra introducida es palíndroma (o no).<|endoftext|> |
60333911a530c6d0de3d9c6f3fd4f03d0a21d8c3aa7fd465a1f2ea70405cc5e6 | @validate({'client': 's3', 'bucket': 'str'})
def get(client, bucket):
'\n Get the BIG-IP admin password from the S3 bucket created by the CFT\n '
data = client.get_object(Bucket=bucket, Key='credentials/master')
json_data = data['Body'].read()
return json.loads(json_data)['password'] | Get the BIG-IP admin password from the S3 bucket created by the CFT | lambda/f5_sca_libs/src/f5_sca_libs/password.py | get | vinnie357/f5-sca-securitystack | 11 | python | @validate({'client': 's3', 'bucket': 'str'})
def get(client, bucket):
'\n \n '
data = client.get_object(Bucket=bucket, Key='credentials/master')
json_data = data['Body'].read()
return json.loads(json_data)['password'] | @validate({'client': 's3', 'bucket': 'str'})
def get(client, bucket):
'\n \n '
data = client.get_object(Bucket=bucket, Key='credentials/master')
json_data = data['Body'].read()
return json.loads(json_data)['password']<|docstring|>Get the BIG-IP admin password from the S3 bucket created by the CFT<|endoftext|> |
154a8d616bf53be4b4350b72ae87483090afdaa4c9e0f3868622bc82b7647a1a | def snap_del(snap_file, session_name):
'\n Delete an existing attribute in a snap file.\n\n Args:\n --------\n snap_file: \n a snap format file.\n\n session_name: \n attribute to delete ["AM", "GM", "PM", "FM"].\n \n '
if (not os.path.exists(snap_file)):
print((('error: ' + snap_file) + ' does not exist!'))
sys.exit(1)
file_format = snaptools.utilities.checkFileFormat(snap_file)
if (file_format != 'snap'):
print(('error: input file %s is not a snap file!' % snap_file))
sys.exit(1)
fin = h5py.File(snap_file, 'r', libver='earliest')
if (session_name not in list(fin.keys())):
print(('error: --session-name %s does not exist in %s' % (session_name, snap_file)))
sys.exit(1)
fout_name = tempfile.NamedTemporaryFile(delete=False, dir=None)
fout = h5py.File(fout_name.name, 'a', libver='earliest')
session_name_list = list(fin.keys())
session_name_list.remove(session_name)
for group_name in session_name_list:
fout.copy(fin[group_name], group_name, shallow=False)
fin.close()
fout.close()
subprocess.check_call('\t'.join(['mv', fout_name.name, snap_file]), shell=True) | Delete an existing attribute in a snap file.
Args:
--------
snap_file:
a snap format file.
session_name:
attribute to delete ["AM", "GM", "PM", "FM"]. | snaptools/snap_del.py | snap_del | hisplan/SnapTools | 26 | python | def snap_del(snap_file, session_name):
'\n Delete an existing attribute in a snap file.\n\n Args:\n --------\n snap_file: \n a snap format file.\n\n session_name: \n attribute to delete ["AM", "GM", "PM", "FM"].\n \n '
if (not os.path.exists(snap_file)):
print((('error: ' + snap_file) + ' does not exist!'))
sys.exit(1)
file_format = snaptools.utilities.checkFileFormat(snap_file)
if (file_format != 'snap'):
print(('error: input file %s is not a snap file!' % snap_file))
sys.exit(1)
fin = h5py.File(snap_file, 'r', libver='earliest')
if (session_name not in list(fin.keys())):
print(('error: --session-name %s does not exist in %s' % (session_name, snap_file)))
sys.exit(1)
fout_name = tempfile.NamedTemporaryFile(delete=False, dir=None)
fout = h5py.File(fout_name.name, 'a', libver='earliest')
session_name_list = list(fin.keys())
session_name_list.remove(session_name)
for group_name in session_name_list:
fout.copy(fin[group_name], group_name, shallow=False)
fin.close()
fout.close()
subprocess.check_call('\t'.join(['mv', fout_name.name, snap_file]), shell=True) | def snap_del(snap_file, session_name):
'\n Delete an existing attribute in a snap file.\n\n Args:\n --------\n snap_file: \n a snap format file.\n\n session_name: \n attribute to delete ["AM", "GM", "PM", "FM"].\n \n '
if (not os.path.exists(snap_file)):
print((('error: ' + snap_file) + ' does not exist!'))
sys.exit(1)
file_format = snaptools.utilities.checkFileFormat(snap_file)
if (file_format != 'snap'):
print(('error: input file %s is not a snap file!' % snap_file))
sys.exit(1)
fin = h5py.File(snap_file, 'r', libver='earliest')
if (session_name not in list(fin.keys())):
print(('error: --session-name %s does not exist in %s' % (session_name, snap_file)))
sys.exit(1)
fout_name = tempfile.NamedTemporaryFile(delete=False, dir=None)
fout = h5py.File(fout_name.name, 'a', libver='earliest')
session_name_list = list(fin.keys())
session_name_list.remove(session_name)
for group_name in session_name_list:
fout.copy(fin[group_name], group_name, shallow=False)
fin.close()
fout.close()
subprocess.check_call('\t'.join(['mv', fout_name.name, snap_file]), shell=True)<|docstring|>Delete an existing attribute in a snap file.
Args:
--------
snap_file:
a snap format file.
session_name:
attribute to delete ["AM", "GM", "PM", "FM"].<|endoftext|> |
547a04af09a9782f7ed9ef15f1316b3bbd18a5d3299df34d07a16b1f8ae24287 | def _check_for_flux_ratio_errors(self):
'\n If combination of settings and models are invalid, raise exceptions.\n '
if (self.fix_source_flux_ratio is not False):
if (self._model.n_sources != 2):
msg = ('fix_source_flux_ratio only valid for models with 2' + 'sources. n_sources = {0}'.format(self._model.n_sources))
raise ValueError(msg)
elif (self.fix_source_flux is not False):
msg = (('fix_source_flux_ratio + fixed_source_flux not ' + 'implemented. Fix the fluxes for each source ') + 'individually instead.')
raise NotImplementedError(msg) | If combination of settings and models are invalid, raise exceptions. | source/MulensModel/fitdata.py | _check_for_flux_ratio_errors | rpoleski/MulensModel | 30 | python | def _check_for_flux_ratio_errors(self):
'\n \n '
if (self.fix_source_flux_ratio is not False):
if (self._model.n_sources != 2):
msg = ('fix_source_flux_ratio only valid for models with 2' + 'sources. n_sources = {0}'.format(self._model.n_sources))
raise ValueError(msg)
elif (self.fix_source_flux is not False):
msg = (('fix_source_flux_ratio + fixed_source_flux not ' + 'implemented. Fix the fluxes for each source ') + 'individually instead.')
raise NotImplementedError(msg) | def _check_for_flux_ratio_errors(self):
'\n \n '
if (self.fix_source_flux_ratio is not False):
if (self._model.n_sources != 2):
msg = ('fix_source_flux_ratio only valid for models with 2' + 'sources. n_sources = {0}'.format(self._model.n_sources))
raise ValueError(msg)
elif (self.fix_source_flux is not False):
msg = (('fix_source_flux_ratio + fixed_source_flux not ' + 'implemented. Fix the fluxes for each source ') + 'individually instead.')
raise NotImplementedError(msg)<|docstring|>If combination of settings and models are invalid, raise exceptions.<|endoftext|> |
216882eb9fd9a55b40cc8c01fccab28af02ea8cb1becda681f0ff74dbc55ae92 | def update(self, bad=False):
'\n Calculate the best-fit source and blend fluxes as well as the chi2.\n\n Keywords :\n bad: *bool*\n Default is *False*. If *True* recalculates the data\n magnification for each point to ensure that there are values\n even for bad datapoints.\n\n '
self.fit_fluxes()
model_flux = self.get_model_fluxes(bad=bad)
diff = (self._dataset.flux - model_flux)
self._chi2_per_point = ((diff / self._dataset.err_flux) ** 2) | Calculate the best-fit source and blend fluxes as well as the chi2.
Keywords :
bad: *bool*
Default is *False*. If *True* recalculates the data
magnification for each point to ensure that there are values
even for bad datapoints. | source/MulensModel/fitdata.py | update | rpoleski/MulensModel | 30 | python | def update(self, bad=False):
'\n Calculate the best-fit source and blend fluxes as well as the chi2.\n\n Keywords :\n bad: *bool*\n Default is *False*. If *True* recalculates the data\n magnification for each point to ensure that there are values\n even for bad datapoints.\n\n '
self.fit_fluxes()
model_flux = self.get_model_fluxes(bad=bad)
diff = (self._dataset.flux - model_flux)
self._chi2_per_point = ((diff / self._dataset.err_flux) ** 2) | def update(self, bad=False):
'\n Calculate the best-fit source and blend fluxes as well as the chi2.\n\n Keywords :\n bad: *bool*\n Default is *False*. If *True* recalculates the data\n magnification for each point to ensure that there are values\n even for bad datapoints.\n\n '
self.fit_fluxes()
model_flux = self.get_model_fluxes(bad=bad)
diff = (self._dataset.flux - model_flux)
self._chi2_per_point = ((diff / self._dataset.err_flux) ** 2)<|docstring|>Calculate the best-fit source and blend fluxes as well as the chi2.
Keywords :
bad: *bool*
Default is *False*. If *True* recalculates the data
magnification for each point to ensure that there are values
even for bad datapoints.<|endoftext|> |
1bdf8233395879dfb4eba5383231370c0e6b7b521216a49019da6b9ad75eee0c | def _calculate_magnifications(self, bad=True):
'\n Calculate the model magnifications for the epochs of the dataset.\n '
if bad:
select = np.ones(self._dataset.n_epochs, dtype=bool)
else:
select = self._dataset.good
if (self.dataset.ephemerides_file is None):
satellite_skycoord = None
else:
satellite_skycoord = self.dataset.satellite_skycoord
magnification_kwargs = {'gamma': self.gamma, 'satellite_skycoord': satellite_skycoord}
if (self._model.n_sources == 1):
mag_matrix = self._model.get_magnification(time=self._dataset.time[select], **magnification_kwargs)
elif (self._model.n_sources == 2):
mag_matrix = self._model.get_magnification(time=self._dataset.time[select], separate=True, **magnification_kwargs)
else:
msg = (('{0} '.format(self._model.n_sources) + 'sources used. Function model.get_magnification can ') + 'only handle <=2 sources')
raise NotImplementedError(msg)
if bad:
self._data_magnification = mag_matrix
elif (self._model.n_sources == 1):
self._data_magnification = np.zeros(self._dataset.n_epochs)
self._data_magnification[self._dataset.good] = mag_matrix
else:
self._data_magnification = [np.zeros(self._dataset.n_epochs)]
self._data_magnification[0][self._dataset.good] = mag_matrix[0]
for source in range(1, self.model.n_sources):
self._data_magnification.append(np.zeros(self._dataset.n_epochs))
self._data_magnification[source][self._dataset.good] = mag_matrix[source] | Calculate the model magnifications for the epochs of the dataset. | source/MulensModel/fitdata.py | _calculate_magnifications | rpoleski/MulensModel | 30 | python | def _calculate_magnifications(self, bad=True):
'\n \n '
if bad:
select = np.ones(self._dataset.n_epochs, dtype=bool)
else:
select = self._dataset.good
if (self.dataset.ephemerides_file is None):
satellite_skycoord = None
else:
satellite_skycoord = self.dataset.satellite_skycoord
magnification_kwargs = {'gamma': self.gamma, 'satellite_skycoord': satellite_skycoord}
if (self._model.n_sources == 1):
mag_matrix = self._model.get_magnification(time=self._dataset.time[select], **magnification_kwargs)
elif (self._model.n_sources == 2):
mag_matrix = self._model.get_magnification(time=self._dataset.time[select], separate=True, **magnification_kwargs)
else:
msg = (('{0} '.format(self._model.n_sources) + 'sources used. Function model.get_magnification can ') + 'only handle <=2 sources')
raise NotImplementedError(msg)
if bad:
self._data_magnification = mag_matrix
elif (self._model.n_sources == 1):
self._data_magnification = np.zeros(self._dataset.n_epochs)
self._data_magnification[self._dataset.good] = mag_matrix
else:
self._data_magnification = [np.zeros(self._dataset.n_epochs)]
self._data_magnification[0][self._dataset.good] = mag_matrix[0]
for source in range(1, self.model.n_sources):
self._data_magnification.append(np.zeros(self._dataset.n_epochs))
self._data_magnification[source][self._dataset.good] = mag_matrix[source] | def _calculate_magnifications(self, bad=True):
'\n \n '
if bad:
select = np.ones(self._dataset.n_epochs, dtype=bool)
else:
select = self._dataset.good
if (self.dataset.ephemerides_file is None):
satellite_skycoord = None
else:
satellite_skycoord = self.dataset.satellite_skycoord
magnification_kwargs = {'gamma': self.gamma, 'satellite_skycoord': satellite_skycoord}
if (self._model.n_sources == 1):
mag_matrix = self._model.get_magnification(time=self._dataset.time[select], **magnification_kwargs)
elif (self._model.n_sources == 2):
mag_matrix = self._model.get_magnification(time=self._dataset.time[select], separate=True, **magnification_kwargs)
else:
msg = (('{0} '.format(self._model.n_sources) + 'sources used. Function model.get_magnification can ') + 'only handle <=2 sources')
raise NotImplementedError(msg)
if bad:
self._data_magnification = mag_matrix
elif (self._model.n_sources == 1):
self._data_magnification = np.zeros(self._dataset.n_epochs)
self._data_magnification[self._dataset.good] = mag_matrix
else:
self._data_magnification = [np.zeros(self._dataset.n_epochs)]
self._data_magnification[0][self._dataset.good] = mag_matrix[0]
for source in range(1, self.model.n_sources):
self._data_magnification.append(np.zeros(self._dataset.n_epochs))
self._data_magnification[source][self._dataset.good] = mag_matrix[source]<|docstring|>Calculate the model magnifications for the epochs of the dataset.<|endoftext|> |
bc42e8b79f4e6dfb445d1b451328f98e3b0e41d6a27056d20f73a9acfec4d85f | def _get_xy_qflux(self):
' Apply a fixed flux ratio. '
y = self._dataset.flux[self._dataset.good]
x = np.array((self._data_magnification[0][self._dataset.good] + (self.fix_source_flux_ratio * self._data_magnification[1][self._dataset.good])))
self.n_fluxes = 1
return (x, y) | Apply a fixed flux ratio. | source/MulensModel/fitdata.py | _get_xy_qflux | rpoleski/MulensModel | 30 | python | def _get_xy_qflux(self):
' '
y = self._dataset.flux[self._dataset.good]
x = np.array((self._data_magnification[0][self._dataset.good] + (self.fix_source_flux_ratio * self._data_magnification[1][self._dataset.good])))
self.n_fluxes = 1
return (x, y) | def _get_xy_qflux(self):
' '
y = self._dataset.flux[self._dataset.good]
x = np.array((self._data_magnification[0][self._dataset.good] + (self.fix_source_flux_ratio * self._data_magnification[1][self._dataset.good])))
self.n_fluxes = 1
return (x, y)<|docstring|>Apply a fixed flux ratio.<|endoftext|> |
d4057665b889b58cee7e7983c152420ed96f856f87550519d5f0237ab815562c | def _get_xy_individual_fluxes(self):
' Account for source fluxes individually '
y = self._dataset.flux[self._dataset.good]
if (self.fix_source_flux is False):
x = np.array(self._data_magnification)
if (self.model.n_sources == 1):
x = x[self._dataset.good]
else:
x = x[(:, self._dataset.good)]
self.n_fluxes = self._model.n_sources
else:
x = None
if (self._model.n_sources == 1):
y -= (self.fix_source_flux[0] * self._data_magnification[self._dataset.good])
else:
for i in range(self._model.n_sources):
if (self.fix_source_flux[i] is False):
self.n_fluxes += 1
if (x is None):
x = self._data_magnification[i][self._dataset.good]
else:
x = np.vstack((x, self._data_magnification[i][self._dataset.good]))
else:
y -= (self.fix_source_flux[i] * self._data_magnification[i][self._dataset.good])
return (x, y) | Account for source fluxes individually | source/MulensModel/fitdata.py | _get_xy_individual_fluxes | rpoleski/MulensModel | 30 | python | def _get_xy_individual_fluxes(self):
' '
y = self._dataset.flux[self._dataset.good]
if (self.fix_source_flux is False):
x = np.array(self._data_magnification)
if (self.model.n_sources == 1):
x = x[self._dataset.good]
else:
x = x[(:, self._dataset.good)]
self.n_fluxes = self._model.n_sources
else:
x = None
if (self._model.n_sources == 1):
y -= (self.fix_source_flux[0] * self._data_magnification[self._dataset.good])
else:
for i in range(self._model.n_sources):
if (self.fix_source_flux[i] is False):
self.n_fluxes += 1
if (x is None):
x = self._data_magnification[i][self._dataset.good]
else:
x = np.vstack((x, self._data_magnification[i][self._dataset.good]))
else:
y -= (self.fix_source_flux[i] * self._data_magnification[i][self._dataset.good])
return (x, y) | def _get_xy_individual_fluxes(self):
' '
y = self._dataset.flux[self._dataset.good]
if (self.fix_source_flux is False):
x = np.array(self._data_magnification)
if (self.model.n_sources == 1):
x = x[self._dataset.good]
else:
x = x[(:, self._dataset.good)]
self.n_fluxes = self._model.n_sources
else:
x = None
if (self._model.n_sources == 1):
y -= (self.fix_source_flux[0] * self._data_magnification[self._dataset.good])
else:
for i in range(self._model.n_sources):
if (self.fix_source_flux[i] is False):
self.n_fluxes += 1
if (x is None):
x = self._data_magnification[i][self._dataset.good]
else:
x = np.vstack((x, self._data_magnification[i][self._dataset.good]))
else:
y -= (self.fix_source_flux[i] * self._data_magnification[i][self._dataset.good])
return (x, y)<|docstring|>Account for source fluxes individually<|endoftext|> |
2acd09c48566dfe638d2af2c3a90a5d49638e6732d29a1954434de9b1aba2b67 | def _setup_linalg_arrays(self):
'\n Create xT and y arrays\n '
(x, y) = self._create_arrays()
xT = self._invert_x_array(x)
(xT, y) = self._weight_linalg_arrays(xT, y)
return (xT, y) | Create xT and y arrays | source/MulensModel/fitdata.py | _setup_linalg_arrays | rpoleski/MulensModel | 30 | python | def _setup_linalg_arrays(self):
'\n \n '
(x, y) = self._create_arrays()
xT = self._invert_x_array(x)
(xT, y) = self._weight_linalg_arrays(xT, y)
return (xT, y) | def _setup_linalg_arrays(self):
'\n \n '
(x, y) = self._create_arrays()
xT = self._invert_x_array(x)
(xT, y) = self._weight_linalg_arrays(xT, y)
return (xT, y)<|docstring|>Create xT and y arrays<|endoftext|> |
88d9946ebfdeb0ff31093cc4658747114674207de0f3e981d28f61a253ca20d8 | def _invert_x_array(self, x):
' Take the transpose of x '
n_epochs = np.sum(self._dataset.good)
xT = np.copy(x).T
xT.shape = (n_epochs, self.n_fluxes)
return xT | Take the transpose of x | source/MulensModel/fitdata.py | _invert_x_array | rpoleski/MulensModel | 30 | python | def _invert_x_array(self, x):
' '
n_epochs = np.sum(self._dataset.good)
xT = np.copy(x).T
xT.shape = (n_epochs, self.n_fluxes)
return xT | def _invert_x_array(self, x):
' '
n_epochs = np.sum(self._dataset.good)
xT = np.copy(x).T
xT.shape = (n_epochs, self.n_fluxes)
return xT<|docstring|>Take the transpose of x<|endoftext|> |
a2589f4e547cb0b098e52f464815509fb2f9876ddd37a907c71fcd3fea1ac5a4 | def _weight_linalg_arrays(self, xT, y):
'weight by data uncertainties'
sigma_inverse = (1.0 / self._dataset.err_flux[self._dataset.good])
y *= sigma_inverse
xT *= np.array(([sigma_inverse] * self.n_fluxes)).T
return (xT, y) | weight by data uncertainties | source/MulensModel/fitdata.py | _weight_linalg_arrays | rpoleski/MulensModel | 30 | python | def _weight_linalg_arrays(self, xT, y):
sigma_inverse = (1.0 / self._dataset.err_flux[self._dataset.good])
y *= sigma_inverse
xT *= np.array(([sigma_inverse] * self.n_fluxes)).T
return (xT, y) | def _weight_linalg_arrays(self, xT, y):
sigma_inverse = (1.0 / self._dataset.err_flux[self._dataset.good])
y *= sigma_inverse
xT *= np.array(([sigma_inverse] * self.n_fluxes)).T
return (xT, y)<|docstring|>weight by data uncertainties<|endoftext|> |
14162515b5a27fbf15f3aabf24bd57d4722b1fd7fae9b723d321776201724d25 | def fit_fluxes(self):
'\n Execute the linear least squares fit to determine the fitted fluxes.\n Sets the values of :py:obj:`~source_fluxes`, :py:obj:`~blend_flux`,\n and (if applicable) :py:obj:`~source_flux`.\n\n Does *not* calculate chi2. To fit for the fluxes and calculate chi2,\n run :py:func:`~update()`.\n '
(xT, y) = self._setup_linalg_arrays()
try:
results = np.linalg.lstsq(xT, y, rcond=(- 1))[0]
except ValueError as e:
message = "{0}\nIf either of these numbers ({1}, {2}) is greater than zero, there is a NaN somewhere, probably in the data. The cause of this error may be the epochs with extreme brightness (e.g., 99.999 mag), which is sometimes used to mark bad data. Other possible reason is mistakenly using phot_fmt='flux' instead of 'mag'"
args = (e, np.sum(np.isnan(xT)), np.sum(np.isnan(y)))
raise ValueError(message.format(*args))
if (self.fix_source_flux_ratio is False):
if (self.fix_source_flux is False):
self._source_fluxes = results[0:self._model.n_sources]
else:
self._source_fluxes = []
index = 0
for i in range(self._model.n_sources):
if (self.fix_source_flux[i] is False):
self._source_fluxes.append(results[index])
index += 1
else:
self._source_fluxes.append(self.fix_source_flux[i])
else:
self._source_fluxes = [results[0], (results[0] * self.fix_source_flux_ratio)]
if (self.fix_blend_flux is False):
self._blend_flux = results[(- 1)]
else:
self._blend_flux = self.fix_blend_flux | Execute the linear least squares fit to determine the fitted fluxes.
Sets the values of :py:obj:`~source_fluxes`, :py:obj:`~blend_flux`,
and (if applicable) :py:obj:`~source_flux`.
Does *not* calculate chi2. To fit for the fluxes and calculate chi2,
run :py:func:`~update()`. | source/MulensModel/fitdata.py | fit_fluxes | rpoleski/MulensModel | 30 | python | def fit_fluxes(self):
'\n Execute the linear least squares fit to determine the fitted fluxes.\n Sets the values of :py:obj:`~source_fluxes`, :py:obj:`~blend_flux`,\n and (if applicable) :py:obj:`~source_flux`.\n\n Does *not* calculate chi2. To fit for the fluxes and calculate chi2,\n run :py:func:`~update()`.\n '
(xT, y) = self._setup_linalg_arrays()
try:
results = np.linalg.lstsq(xT, y, rcond=(- 1))[0]
except ValueError as e:
message = "{0}\nIf either of these numbers ({1}, {2}) is greater than zero, there is a NaN somewhere, probably in the data. The cause of this error may be the epochs with extreme brightness (e.g., 99.999 mag), which is sometimes used to mark bad data. Other possible reason is mistakenly using phot_fmt='flux' instead of 'mag'"
args = (e, np.sum(np.isnan(xT)), np.sum(np.isnan(y)))
raise ValueError(message.format(*args))
if (self.fix_source_flux_ratio is False):
if (self.fix_source_flux is False):
self._source_fluxes = results[0:self._model.n_sources]
else:
self._source_fluxes = []
index = 0
for i in range(self._model.n_sources):
if (self.fix_source_flux[i] is False):
self._source_fluxes.append(results[index])
index += 1
else:
self._source_fluxes.append(self.fix_source_flux[i])
else:
self._source_fluxes = [results[0], (results[0] * self.fix_source_flux_ratio)]
if (self.fix_blend_flux is False):
self._blend_flux = results[(- 1)]
else:
self._blend_flux = self.fix_blend_flux | def fit_fluxes(self):
'\n Execute the linear least squares fit to determine the fitted fluxes.\n Sets the values of :py:obj:`~source_fluxes`, :py:obj:`~blend_flux`,\n and (if applicable) :py:obj:`~source_flux`.\n\n Does *not* calculate chi2. To fit for the fluxes and calculate chi2,\n run :py:func:`~update()`.\n '
(xT, y) = self._setup_linalg_arrays()
try:
results = np.linalg.lstsq(xT, y, rcond=(- 1))[0]
except ValueError as e:
message = "{0}\nIf either of these numbers ({1}, {2}) is greater than zero, there is a NaN somewhere, probably in the data. The cause of this error may be the epochs with extreme brightness (e.g., 99.999 mag), which is sometimes used to mark bad data. Other possible reason is mistakenly using phot_fmt='flux' instead of 'mag'"
args = (e, np.sum(np.isnan(xT)), np.sum(np.isnan(y)))
raise ValueError(message.format(*args))
if (self.fix_source_flux_ratio is False):
if (self.fix_source_flux is False):
self._source_fluxes = results[0:self._model.n_sources]
else:
self._source_fluxes = []
index = 0
for i in range(self._model.n_sources):
if (self.fix_source_flux[i] is False):
self._source_fluxes.append(results[index])
index += 1
else:
self._source_fluxes.append(self.fix_source_flux[i])
else:
self._source_fluxes = [results[0], (results[0] * self.fix_source_flux_ratio)]
if (self.fix_blend_flux is False):
self._blend_flux = results[(- 1)]
else:
self._blend_flux = self.fix_blend_flux<|docstring|>Execute the linear least squares fit to determine the fitted fluxes.
Sets the values of :py:obj:`~source_fluxes`, :py:obj:`~blend_flux`,
and (if applicable) :py:obj:`~source_flux`.
Does *not* calculate chi2. To fit for the fluxes and calculate chi2,
run :py:func:`~update()`.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.