id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
56,345 | def distance_matrix(stream_list, shift_len=0.0,
allow_individual_trace_shifts=True, cores=1):
"""
Compute distance matrix for waveforms based on cross-correlations.
Function to compute the distance matrix for all templates - will give
distance as 1-abs(cccoh), e.g. a well correlated pair of templates will
have small distances, and an equally well correlated reverse image will
have the same distance as a positively correlated image - this is an issue.
:type stream_list: list
:param stream_list:
List of the :class:`obspy.core.stream.Stream` to compute the distance
matrix for
:type shift_len: float
:param shift_len: How many seconds for templates to shift
:type allow_individual_trace_shifts: bool
:param allow_individual_trace_shifts:
Controls whether templates are shifted by shift_len in relation to the
picks as a whole, or whether each trace can be shifted individually.
Defaults to True.
:type cores: int
:param cores: Number of cores to parallel process using, defaults to 1.
:returns: distance matrix
:rtype: :class:`numpy.ndarray`
.. warning::
Because distance is given as :math:`1-abs(coherence)`, negatively
correlated and positively correlated objects are given the same
distance.
.. note::
Requires all traces to have the same sampling rate and same length.
"""
allow_individual_trace_shifts =\
allow_individual_trace_shifts and shift_len > 0
# Initialize square matrix
dist_mat = np.array([np.array([0.0] * len(stream_list))] *
len(stream_list))
shift_mat = np.zeros_like(dist_mat)
shift_mat = np.zeros([len(stream_list),
len(stream_list),
max([len(st) for st in stream_list])])
n_shifts_per_stream = 1
for i, master in enumerate(stream_list):
dist_list, shift_list = cross_chan_correlation(
st1=master, streams=stream_list, shift_len=shift_len,
allow_individual_trace_shifts=allow_individual_trace_shifts,
xcorr_func='fftw', cores=cores)
dist_mat[i] = 1 - dist_list
if allow_individual_trace_shifts:
n_shifts_per_stream = shift_list.shape[1]
shift_mat[i, 0:, 0:n_shifts_per_stream] = shift_list
else:
shift_mat[i, 0:, 0:n_shifts_per_stream] = shift_list
if shift_len == 0:
assert np.allclose(dist_mat, dist_mat.T, atol=0.00001)
# Force perfect symmetry
dist_mat = (dist_mat + dist_mat.T) / 2
shift_mat = shift_mat[:, :, 0:n_shifts_per_stream].squeeze()
else:
# get the shortest distance for each correlation pair
dist_mat_shortest = np.minimum(dist_mat, dist_mat.T)
# Indicator says which matrix has shortest dist: value 0: mat2; 1: mat1
mat_indicator = dist_mat_shortest == dist_mat
mat_indicator = np.repeat(mat_indicator[:, :, np.newaxis],
n_shifts_per_stream, axis=2)[:, :]
# Get shift for the shortest distances
shift_mat = shift_mat[:, :, 0:n_shifts_per_stream][:, :]
shift_mat = shift_mat * mat_indicator +\
np.transpose(shift_mat, [1, 0, 2]) * (1 - mat_indicator)
dist_mat = dist_mat_shortest
np.fill_diagonal(dist_mat, 0)
return dist_mat, shift_mat.squeeze()
| def distance_matrix(stream_list, shift_len=0.0,
allow_individual_trace_shifts=True, cores=1):
"""
Compute distance matrix for waveforms based on cross-correlations.
Function to compute the distance matrix for all templates - will give
distance as 1-abs(cccoh), e.g. a well correlated pair of templates will
have small distances, and an equally well correlated reverse image will
have the same distance as a positively correlated image - this is an issue.
:type stream_list: list
:param stream_list:
List of the :class:`obspy.core.stream.Stream` to compute the distance
matrix for
:type shift_len: float
:param shift_len: How many seconds for templates to shift
:type allow_individual_trace_shifts: bool
:param allow_individual_trace_shifts:
Controls whether templates are shifted by shift_len in relation to the
picks as a whole, or whether each trace can be shifted individually.
Defaults to True.
:type cores: int
:param cores: Number of cores to parallel process using, defaults to 1.
:returns: distance matrix
:rtype: :class:`numpy.ndarray`
.. warning::
Because distance is given as :math:`1-abs(coherence)`, negatively
correlated and positively correlated objects are given the same
distance.
.. note::
Requires all traces to have the same sampling rate and same length.
"""
allow_individual_trace_shifts =\
allow_individual_trace_shifts and shift_len > 0
# Initialize square matrix
dist_mat = np.array([np.array([0.0] * len(stream_list))] *
len(stream_list))
shift_mat = np.zeros_like(dist_mat)
shift_mat = np.zeros([len(stream_list),
len(stream_list),
max([len(st) for st in stream_list])])
n_shifts_per_stream = 1
for i, master in enumerate(stream_list):
dist_list, shift_list = cross_chan_correlation(
st1=master, streams=stream_list, shift_len=shift_len,
allow_individual_trace_shifts=allow_individual_trace_shifts,
xcorr_func='fftw', cores=cores)
dist_mat[i] = 1 - dist_list
if allow_individual_trace_shifts:
n_shifts_per_stream = shift_list.shape[1]
shift_mat[i, 0:, 0:n_shifts_per_stream] = shift_list
else:
shift_mat[i, 0:, 0:n_shifts_per_stream] = shift_list
if shift_len == 0:
assert np.allclose(dist_mat, dist_mat.T, atol=0.00001)
# Force perfect symmetry
dist_mat = (dist_mat + dist_mat.T) / 2
shift_mat = shift_mat[:, :, 0:n_shifts_per_stream].squeeze()
else:
# get the shortest distance for each correlation pair
dist_mat_shortest = np.minimum(dist_mat, dist_mat.T)
# Indicator says which matrix has shortest dist: value 0: mat2; 1: mat1
mat_indicator = dist_mat_shortest == dist_mat
mat_indicator = np.repeat(mat_indicator[:, :, np.newaxis],
n_shifts_per_stream, axis=2)[:, :]
# Get shift for the shortest distances
shift_mat = shift_mat[:, :, 0:n_shifts_per_stream][:, :]
shift_mat = (
shift_mat * mat_indicator +
np.transpose(shift_mat, [1, 0, 2]) * (1 - mat_indicator))
dist_mat = dist_mat_shortest
np.fill_diagonal(dist_mat, 0)
return dist_mat, shift_mat.squeeze()
|
22,176 | def render_pdf(event, positions, opt):
from PyPDF2 import PdfMerger, PdfReader, PdfWriter, Transformation
from PyPDF2.generic import RectangleObject
Renderer._register_fonts()
renderermap = {
bi.item_id: _renderer(event, bi.layout)
for bi in BadgeItem.objects.select_related('layout').filter(item__event=event)
}
try:
default_renderer = _renderer(event, event.badge_layouts.get(default=True))
except BadgeLayout.DoesNotExist:
default_renderer = None
op_renderers = [(op, renderermap.get(op.item_id, default_renderer)) for op in positions if renderermap.get(op.item_id, default_renderer)]
if not len(op_renderers):
raise OrderError(_("None of the selected products is configured to print badges."))
# render each badge on its own page first
merger = PdfMerger()
merger.add_metadata({
'/Title': 'Badges',
'/Creator': 'pretix',
})
for op, renderer in op_renderers:
buffer = BytesIO()
page = canvas.Canvas(buffer, pagesize=pagesizes.A4)
with language(op.order.locale, op.order.event.settings.region):
renderer.draw_page(page, op.order, op)
if opt['pagesize']:
page.setPageSize(opt['pagesize'])
page.save()
buffer = renderer.render_background(buffer, _('Badge'))
merger.append(ContentFile(buffer.read()))
outbuffer = BytesIO()
merger.write(outbuffer)
outbuffer.seek(0)
badges_per_page = opt['cols'] * opt['rows']
if (badges_per_page == 1):
# no need to place multiple badges on one page
return outbuffer
# place n-up badges/pages per page
badges_pdf = PdfReader(outbuffer)
nup_pdf = PdfWriter()
nup_page = None
for i, page in enumerate(badges_pdf.pages):
di = i % badges_per_page
if (di == 0):
nup_page = nup_pdf.add_blank_page(
width=opt['pagesize'][0],
height=opt['pagesize'][1],
)
tx = opt['margins'][3] + (di % opt['cols']) * opt['offsets'][0]
ty = opt['margins'][2] + (opt['rows'] - 1 - (di // opt['cols'])) * opt['offsets'][1]
page.add_transformation(Transformation().translate(tx, ty))
page.mediabox = RectangleObject((
page.mediabox.left.as_numeric() + tx,
page.mediabox.bottom.as_numeric() + ty,
page.mediabox.right.as_numeric() + tx,
page.mediabox.top.as_numeric() + ty
))
page.trimbox = page.mediabox
nup_page.merge_page(page)
outbuffer = BytesIO()
nup_pdf.write(outbuffer)
outbuffer.seek(0)
return outbuffer
| def render_pdf(event, positions, opt):
from PyPDF2 import PdfMerger, PdfReader, PdfWriter, Transformation
from PyPDF2.generic import RectangleObject
Renderer._register_fonts()
renderermap = {
bi.item_id: _renderer(event, bi.layout)
for bi in BadgeItem.objects.select_related('layout').filter(item__event=event)
}
try:
default_renderer = _renderer(event, event.badge_layouts.get(default=True))
except BadgeLayout.DoesNotExist:
default_renderer = None
op_renderers = [(op, renderermap.get(op.item_id, default_renderer)) for op in positions if renderermap.get(op.item_id, default_renderer)]
if not len(op_renderers):
raise OrderError(_("None of the selected products is configured to print badges."))
# render each badge on its own page first
merger = PdfMerger()
merger.add_metadata({
'/Title': 'Badges',
'/Creator': 'pretix',
})
for op, renderer in op_renderers:
buffer = BytesIO()
page = canvas.Canvas(buffer, pagesize=pagesizes.A4)
with language(op.order.locale, op.order.event.settings.region):
renderer.draw_page(page, op.order, op)
if opt['pagesize']:
page.setPageSize(opt['pagesize'])
page.save()
buffer = renderer.render_background(buffer, _('Badge'))
merger.append(ContentFile(buffer.read()))
outbuffer = BytesIO()
merger.write(outbuffer)
outbuffer.seek(0)
badges_per_page = opt['cols'] * opt['rows']
if badges_per_page == 1:
# no need to place multiple badges on one page
return outbuffer
# place n-up badges/pages per page
badges_pdf = PdfReader(outbuffer)
nup_pdf = PdfWriter()
nup_page = None
for i, page in enumerate(badges_pdf.pages):
di = i % badges_per_page
if (di == 0):
nup_page = nup_pdf.add_blank_page(
width=opt['pagesize'][0],
height=opt['pagesize'][1],
)
tx = opt['margins'][3] + (di % opt['cols']) * opt['offsets'][0]
ty = opt['margins'][2] + (opt['rows'] - 1 - (di // opt['cols'])) * opt['offsets'][1]
page.add_transformation(Transformation().translate(tx, ty))
page.mediabox = RectangleObject((
page.mediabox.left.as_numeric() + tx,
page.mediabox.bottom.as_numeric() + ty,
page.mediabox.right.as_numeric() + tx,
page.mediabox.top.as_numeric() + ty
))
page.trimbox = page.mediabox
nup_page.merge_page(page)
outbuffer = BytesIO()
nup_pdf.write(outbuffer)
outbuffer.seek(0)
return outbuffer
|
57,302 | def _maybe_call_security_loader(address):
security_loader = dask.config.get("distributed.client.security-loader")
if security_loader:
try:
security_loader = import_term(security_loader)
except Exception as exc:
raise ImportError(
f"Failed to import `{security_loader}` configured at "
f"`distributed.client.security-loader` - is this module "
f"installed?"
) from exc
return security_loader({"address": address})
return None
| def _maybe_call_security_loader(address):
security_loader_term = dask.config.get("distributed.client.security-loader")
if security_loader_term:
try:
security_loader = import_term(security_loader)
except Exception as exc:
raise ImportError(
f"Failed to import `{security_loader}` configured at "
f"`distributed.client.security-loader` - is this module "
f"installed?"
) from exc
return security_loader({"address": address})
return None
|
45,813 | def load_image(file_name):
"""Loads the image with OpenCV and converts to torch.Tensor"""
if not os.path.isfile(file_name):
raise AssertionError("Invalid file {}".format(file_name))
# load image with OpenCV
img = cv2.imread(file_name, cv2.IMREAD_COLOR)
# convert image to torch tensor
tensor = tgm.utils.image_to_tensor(img).float() / 255.0
return tensor.view(1, *tensor.shape) # 1xCxHxW
| def load_image(file_name):
"""Loads the image with OpenCV and converts to torch.Tensor"""
if not os.path.isfile(file_name):
raise FileExistsError("Invalid file {}".format(file_name))
# load image with OpenCV
img = cv2.imread(file_name, cv2.IMREAD_COLOR)
# convert image to torch tensor
tensor = tgm.utils.image_to_tensor(img).float() / 255.0
return tensor.view(1, *tensor.shape) # 1xCxHxW
|
35,162 | def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None,
callback=None, atol=None, callback_type=None):
"""Uses Generalized Minimal RESidual iteration to solve ``Ax = b``.
Args:
A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): The real or complex
matrix of the linear system with shape ``(n, n)``.
b (cupy.ndarray): Right hand side of the linear system with shape
``(n,)`` or ``(n, 1)``.
x0 (cupy.ndarray): Starting guess for the solution.
tol (float): Tolerance for convergence.
restart (int): Number of iterations between restarts. Larger values
increase iteration cost, but may be necessary for convergence.
maxiter (int): Maximum number of iterations.
M (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Preconditioner for
``A``. The preconditioner should approximate the inverse of ``A``.
callback (function): User-specified function to call on every restart.
It is called as ``callback(arg)``, where ``arg`` is selected by
``callback_type``.
callback_type (str): 'x' or 'pr_norm'. If 'x', the current solution
vector is used as an argument of callback function. if `pr_norm`,
relative (preconditioned) residual norm is used as an arugment.
atol (float): Tolerance for convergence.
Returns:
tuple:
It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is
the converged solution and ``info`` provides convergence
information.
Reference:
M. Wang, H. Klie, M. Parashar and H. Sudan, "Solving Sparse Linear
Systems on NVIDIA Tesla GPUs", ICCS 2009 (2009).
.. seealso:: :func:`scipy.sparse.linalg.gmres`
"""
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape: {})'.format(A.shape))
if A.dtype.char not in 'fdFD':
raise TypeError('unsupprted dtype (actual: {})'.format(A.dtype))
n = A.shape[0]
if not (b.shape == (n,) or b.shape == (n, 1)):
raise ValueError('b has incompatible dimensins')
b = b.astype(A.dtype).ravel()
if n == 0:
return cupy.empty_like(b), 0
b_norm = cupy.linalg.norm(b)
if b_norm == 0:
return b, 0
if atol is None:
atol = tol * float(b_norm)
else:
atol = max(float(atol), tol * float(b_norm))
if x0 is None:
x = cupy.zeros((n,), dtype=A.dtype)
else:
if not (x0.shape == (n,) or x0.shape == (n, 1)):
raise ValueError('x0 has incompatible dimensins')
x = x0.astype(A.dtype).ravel()
if maxiter is None:
maxiter = n * 10
if restart is None:
restart = 20
restart = min(restart, n)
if callback_type is None:
callback_type = 'pr_norm'
if callback_type not in ('x', 'pr_norm'):
raise ValueError('Unknow callback_type: {}'.format(callback_type))
if callback is None:
callback_type = None
V = cupy.empty((n, restart), dtype=A.dtype, order='F')
H = cupy.zeros((restart+1, restart), dtype=A.dtype, order='F')
e = numpy.zeros((restart+1,), dtype=A.dtype)
matvec, psolve = _make_funcs(A, M)
compute_hu = _make_compute_hu(V)
iters = 0
while True:
mx = psolve(x)
r = b - matvec(mx)
r_norm = cublas.nrm2(r)
if callback_type == 'x':
callback(mx)
elif callback_type == 'pr_norm' and iters > 0:
callback(r_norm / b_norm)
if r_norm <= atol or iters >= maxiter:
break
v = r / r_norm
V[:, 0] = v
e[0] = r_norm
# Arnoldi iteration
for j in range(restart):
z = psolve(v)
u = matvec(z)
H[:j+1, j], u = compute_hu(u, j)
cublas.nrm2(u, out=H[j+1, j])
if j+1 < restart:
v = u / H[j+1, j]
V[:, j+1] = v
# Note: The least-square solution to equation Hy = e is computed on CPU
# because it is faster if tha matrix size is small.
ret = scipy.linalg.lstsq(cupy.asnumpy(H), e)
y = cupy.array(ret[0])
x += V @ y
iters += restart
info = 0
if iters == maxiter and not (r_norm <= atol):
info = iters
return mx, info
| def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None,
callback=None, atol=None, callback_type=None):
"""Uses Generalized Minimal RESidual iteration to solve ``Ax = b``.
Args:
A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): The real or complex
matrix of the linear system with shape ``(n, n)``.
b (cupy.ndarray): Right hand side of the linear system with shape
``(n,)`` or ``(n, 1)``.
x0 (cupy.ndarray): Starting guess for the solution.
tol (float): Tolerance for convergence.
restart (int): Number of iterations between restarts. Larger values
increase iteration cost, but may be necessary for convergence.
maxiter (int): Maximum number of iterations.
M (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Preconditioner for
``A``. The preconditioner should approximate the inverse of ``A``.
callback (function): User-specified function to call on every restart.
It is called as ``callback(arg)``, where ``arg`` is selected by
``callback_type``.
callback_type (str): 'x' or 'pr_norm'. If 'x', the current solution
vector is used as an argument of callback function. if 'pr_norm',
relative (preconditioned) residual norm is used as an arugment.
atol (float): Tolerance for convergence.
Returns:
tuple:
It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is
the converged solution and ``info`` provides convergence
information.
Reference:
M. Wang, H. Klie, M. Parashar and H. Sudan, "Solving Sparse Linear
Systems on NVIDIA Tesla GPUs", ICCS 2009 (2009).
.. seealso:: :func:`scipy.sparse.linalg.gmres`
"""
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape: {})'.format(A.shape))
if A.dtype.char not in 'fdFD':
raise TypeError('unsupprted dtype (actual: {})'.format(A.dtype))
n = A.shape[0]
if not (b.shape == (n,) or b.shape == (n, 1)):
raise ValueError('b has incompatible dimensins')
b = b.astype(A.dtype).ravel()
if n == 0:
return cupy.empty_like(b), 0
b_norm = cupy.linalg.norm(b)
if b_norm == 0:
return b, 0
if atol is None:
atol = tol * float(b_norm)
else:
atol = max(float(atol), tol * float(b_norm))
if x0 is None:
x = cupy.zeros((n,), dtype=A.dtype)
else:
if not (x0.shape == (n,) or x0.shape == (n, 1)):
raise ValueError('x0 has incompatible dimensins')
x = x0.astype(A.dtype).ravel()
if maxiter is None:
maxiter = n * 10
if restart is None:
restart = 20
restart = min(restart, n)
if callback_type is None:
callback_type = 'pr_norm'
if callback_type not in ('x', 'pr_norm'):
raise ValueError('Unknow callback_type: {}'.format(callback_type))
if callback is None:
callback_type = None
V = cupy.empty((n, restart), dtype=A.dtype, order='F')
H = cupy.zeros((restart+1, restart), dtype=A.dtype, order='F')
e = numpy.zeros((restart+1,), dtype=A.dtype)
matvec, psolve = _make_funcs(A, M)
compute_hu = _make_compute_hu(V)
iters = 0
while True:
mx = psolve(x)
r = b - matvec(mx)
r_norm = cublas.nrm2(r)
if callback_type == 'x':
callback(mx)
elif callback_type == 'pr_norm' and iters > 0:
callback(r_norm / b_norm)
if r_norm <= atol or iters >= maxiter:
break
v = r / r_norm
V[:, 0] = v
e[0] = r_norm
# Arnoldi iteration
for j in range(restart):
z = psolve(v)
u = matvec(z)
H[:j+1, j], u = compute_hu(u, j)
cublas.nrm2(u, out=H[j+1, j])
if j+1 < restart:
v = u / H[j+1, j]
V[:, j+1] = v
# Note: The least-square solution to equation Hy = e is computed on CPU
# because it is faster if tha matrix size is small.
ret = scipy.linalg.lstsq(cupy.asnumpy(H), e)
y = cupy.array(ret[0])
x += V @ y
iters += restart
info = 0
if iters == maxiter and not (r_norm <= atol):
info = iters
return mx, info
|
38,738 | def detect_topology():
rt = runtime()
detect_remote_systems = rt.get_option('general/0/remote_detect')
topo_prefix = os.path.join(os.getenv('HOME'), '.reframe/topology')
for part in rt.system.partitions:
getlogger().debug(f'detecting topology info for {part.fullname}')
found_procinfo = False
found_devinfo = False
if part.processor.info != {}:
# Processor info set up already in the configuration
getlogger().debug(
f'> topology found in configuration file; skipping...'
)
found_procinfo = True
if part.devices:
# Devices set up already in the configuration
getlogger().debug(
f'> devices found in configuration file; skipping...'
)
found_devinfo = True
if found_procinfo and found_devinfo:
continue
topo_file = os.path.join(
topo_prefix, f'{rt.system.name}-{part.name}', 'processor.json'
)
dev_file = os.path.join(
topo_prefix, f'{rt.system.name}-{part.name}', 'devices.json'
)
if not found_procinfo and os.path.exists(topo_file):
getlogger().debug(
f'> found topology file {topo_file!r}; loading...'
)
try:
part.processor._info = _load_info(
topo_file, _subschema('#/defs/processor_info')
)
found_procinfo = True
except json.decoder.JSONDecodeError:
getlogger().debug(
f'> could not load {topo_file!r}; ignoring...'
)
if not found_devinfo and os.path.exists(dev_file):
getlogger().debug(
f'> found devices file {dev_file!r}; loading...'
)
try:
part._devices = _load_info(
dev_file, _subschema('#/defs/devices')
)
found_devinfo = True
except json.decoder.JSONDecodeError:
getlogger().debug(
f'> could not load {dev_file!r}; ignoring...'
)
if found_procinfo and found_devinfo:
continue
if not found_procinfo:
# No topology found, try to auto-detect it
getlogger().debug(f'> no topology file found; auto-detecting...')
if _is_part_local(part):
# Unconditionally detect the system for fully local partitions
part.processor._info = cpuinfo()
_save_info(topo_file, part.processor.info)
elif detect_remote_systems:
part.processor._info = _remote_detect(part)
if part.processor.info:
_save_info(topo_file, part.processor.info)
getlogger().debug(f'> saved topology in {topo_file!r}')
if not found_devinfo:
getlogger().debug(f'> device auto-detection is not supported')
| def detect_topology():
rt = runtime()
detect_remote_systems = rt.get_option('general/0/remote_detect')
topo_prefix = os.path.join(os.getenv('HOME'), '.reframe/topology')
for part in rt.system.partitions:
getlogger().debug(f'detecting topology info for {part.fullname}')
found_procinfo = False
found_devinfo = False
if part.processor.info != {}:
# Processor info set up already in the configuration
getlogger().debug(
f'> topology found in configuration file; skipping...'
)
found_procinfo = True
if part.devices:
# Devices set up already in the configuration
getlogger().debug(
f'> devices found in configuration file; skipping...'
)
found_devinfo = True
if found_procinfo and found_devinfo:
continue
topo_file = os.path.join(
topo_prefix, f'{rt.system.name}-{part.name}', 'processor.json'
)
dev_file = os.path.join(
topo_prefix, f'{rt.system.name}-{part.name}', 'devices.json'
)
if not found_procinfo and os.path.exists(topo_file):
getlogger().debug(
f'> found topology file {topo_file!r}; loading...'
)
try:
part.processor._info = _load_info(
topo_file, _subschema('#/defs/processor_info')
)
found_procinfo = True
except json.decoder.JSONDecodeError as e:
getlogger().debug(
f'> could not load {topo_file!r}; ignoring...'
)
if not found_devinfo and os.path.exists(dev_file):
getlogger().debug(
f'> found devices file {dev_file!r}; loading...'
)
try:
part._devices = _load_info(
dev_file, _subschema('#/defs/devices')
)
found_devinfo = True
except json.decoder.JSONDecodeError:
getlogger().debug(
f'> could not load {dev_file!r}; ignoring...'
)
if found_procinfo and found_devinfo:
continue
if not found_procinfo:
# No topology found, try to auto-detect it
getlogger().debug(f'> no topology file found; auto-detecting...')
if _is_part_local(part):
# Unconditionally detect the system for fully local partitions
part.processor._info = cpuinfo()
_save_info(topo_file, part.processor.info)
elif detect_remote_systems:
part.processor._info = _remote_detect(part)
if part.processor.info:
_save_info(topo_file, part.processor.info)
getlogger().debug(f'> saved topology in {topo_file!r}')
if not found_devinfo:
getlogger().debug(f'> device auto-detection is not supported')
|
36,274 | def _stats_from_measurements(bs_results: np.ndarray, qubit_index_map: Dict,
setting: ExperimentSetting, n_shots: int = None,
coeff: float = 1.0) -> Tuple[float, float]:
"""
:param bs_results: results from running `qc.run`
:param qubit_index_map: dict mapping qubit to classical register index
:param setting: ExperimentSetting
:param n_shots: number of shots in the measurement process
:return: tuple specifying (mean, variance)
"""
if n_shots is not None:
warnings.warn("The argument n_shots will be deprecated in favor of len(bs_results).",
DeprecationWarning)
else:
n_shots = len(bs_results)
obs = setting.out_operator
# Identify classical register indices to select
idxs = [qubit_index_map[q] for q, _ in obs]
if len(idxs) == 0: # identity term
return coeff, 0
# Pick columns corresponding to qubits with a non-identity out_operation
obs_strings = bs_results[:, idxs]
# Transform bits to eigenvalues; ie (+1, -1)
my_obs_strings = 1 - 2 * obs_strings
# Multiply row-wise to get operator values. Do statistics. Return result.
obs_vals = coeff * np.prod(my_obs_strings, axis=1)
obs_mean = np.mean(obs_vals)
obs_var = np.var(obs_vals) / n_shots
return obs_mean, obs_var
| def _stats_from_measurements(bs_results: np.ndarray, qubit_index_map: Dict,
setting: ExperimentSetting, n_shots: Optional[int] = None,
coeff: float = 1.0) -> Tuple[float, float]:
"""
:param bs_results: results from running `qc.run`
:param qubit_index_map: dict mapping qubit to classical register index
:param setting: ExperimentSetting
:param n_shots: number of shots in the measurement process
:return: tuple specifying (mean, variance)
"""
if n_shots is not None:
warnings.warn("The argument n_shots will be deprecated in favor of len(bs_results).",
DeprecationWarning)
else:
n_shots = len(bs_results)
obs = setting.out_operator
# Identify classical register indices to select
idxs = [qubit_index_map[q] for q, _ in obs]
if len(idxs) == 0: # identity term
return coeff, 0
# Pick columns corresponding to qubits with a non-identity out_operation
obs_strings = bs_results[:, idxs]
# Transform bits to eigenvalues; ie (+1, -1)
my_obs_strings = 1 - 2 * obs_strings
# Multiply row-wise to get operator values. Do statistics. Return result.
obs_vals = coeff * np.prod(my_obs_strings, axis=1)
obs_mean = np.mean(obs_vals)
obs_var = np.var(obs_vals) / n_shots
return obs_mean, obs_var
|
7,086 | def test_detect_old_contact_file_already_removed(
workflow,
monkeypatch,
caplog,
log_filter,
):
"""It should not error if the contact file is removed by something else.
E.G:
* detect_old_contact_file in another client.
* cylc clean.
* Aliens.
"""
def _is_process_running(*args):
nonlocal workflow
workflow.contact_file.unlink()
return False
# replace the process check with something that removes the contact file
# (this simulates the contact file being removed whilst the process check
# is running as this could take a while)
monkeypatch.setattr(
'cylc.flow.workflow_files._is_process_running',
_is_process_running,
)
caplog.set_level(logging.INFO, logger=CYLC_LOG)
# try to remove the contact file
# (this shoud not error if the contact file does not exist)
detect_old_contact_file(workflow.reg)
assert log_filter(caplog, contains='Removing contact file')
| def test_detect_old_contact_file_already_removed(
workflow,
monkeypatch,
caplog,
log_filter,
):
"""It should not error if the contact file is removed by something else.
E.G:
* detect_old_contact_file in another client.
* cylc clean.
* Aliens.
"""
def _is_process_running(*args):
nonlocal workflow
workflow.contact_file.unlink()
return False
# replace the process check with something that removes the contact file
# (this simulates the contact file being removed whilst the process check
# is running as this could take a while)
monkeypatch.setattr(
'cylc.flow.workflow_files._is_process_running',
_is_process_running,
)
caplog.set_level(logging.INFO, logger=CYLC_LOG)
# try to remove the contact file
# (this should not error if the contact file does not exist)
detect_old_contact_file(workflow.reg)
assert log_filter(caplog, contains='Removing contact file')
|
17,451 | def _cov_corr(da_a, da_b, dim=None, ddof=0, method=None):
"""
Internal method for xr.cov() and xr.corr() so only have to
sanitize the input arrays once and we don't repeat code.
"""
# 1. Broadcast the two arrays
da_a, da_b = align(da_a, da_b, join="inner", copy=False)
# 2. Ignore the nans
valid_values = da_a.notnull() & da_b.notnull()
valid_count = valid_values.sum(dim) - ddof
def _get_valid_values(da, other):
"""
Function to lazily mask da_a and da_b
following a similar approach to
https://github.com/pydata/xarray/pull/4559
"""
missing_vals = np.logical_or(da.isnull(), other.isnull())
if missing_vals.any():
da = da.where(~missing_vals)
return da
else:
return da
da_a = da_a.map_blocks(_get_valid_values, args=[da_b])
da_b = da_b.map_blocks(_get_valid_values, args=[da_a])
# 3. Detrend along the given dim
# https://github.com/pydata/xarray/issues/4804#issuecomment-760114285
demeaned_da_ab = (da_a * da_b) - (da_a.mean(dim=dim) * da_b.mean(dim=dim))
# 4. Compute covariance along the given dim
# N.B. `skipna=False` is required or there is a bug when computing
# auto-covariance. E.g. Try xr.cov(da,da) for
# da = xr.DataArray([[1, 2], [1, np.nan]], dims=["x", "time"])
cov = demeaned_da_ab.sum(dim=dim, skipna=True, min_count=1) / (valid_count)
if method == "cov":
return cov
else:
# compute std + corr
da_a_std = da_a.std(dim=dim)
da_b_std = da_b.std(dim=dim)
corr = cov / (da_a_std * da_b_std)
return corr
| def _cov_corr(da_a, da_b, dim=None, ddof=0, method=None):
"""
Internal method for xr.cov() and xr.corr() so only have to
sanitize the input arrays once and we don't repeat code.
"""
# 1. Broadcast the two arrays
da_a, da_b = align(da_a, da_b, join="inner", copy=False)
# 2. Ignore the nans
valid_values = da_a.notnull() & da_b.notnull()
valid_count = valid_values.sum(dim) - ddof
def _get_valid_values(da, other):
"""
Function to lazily mask da_a and da_b
following a similar approach to
https://github.com/pydata/xarray/pull/4559
"""
missing_vals = np.logical_or(da.isnull(), other.isnull())
if missing_vals.any():
da = da.where(~missing_vals)
return da
else:
return da
da_a = da_a.map_blocks(_get_valid_values, args=[da_b])
da_b = da_b.map_blocks(_get_valid_values, args=[da_a])
# 3. Detrend along the given dim
# https://github.com/pydata/xarray/issues/4804#issuecomment-760114285
demeaned_da_ab = (da_a * da_b).mean(dim=dim) - da_a.mean(dim=dim) * da_b.mean(dim=dim)
# 4. Compute covariance along the given dim
# N.B. `skipna=False` is required or there is a bug when computing
# auto-covariance. E.g. Try xr.cov(da,da) for
# da = xr.DataArray([[1, 2], [1, np.nan]], dims=["x", "time"])
cov = demeaned_da_ab.sum(dim=dim, skipna=True, min_count=1) / (valid_count)
if method == "cov":
return cov
else:
# compute std + corr
da_a_std = da_a.std(dim=dim)
da_b_std = da_b.std(dim=dim)
corr = cov / (da_a_std * da_b_std)
return corr
|
20,535 | def generate_qc(fname_in1, fname_in2=None, fname_seg=None, angle_line=None, args=None, path_qc=None, fps=None,
dataset=None, subject=None, path_img=None, process=None):
"""
Generate a QC entry allowing to quickly review results. This function is the entry point and is called by SCT
scripts (e.g. sct_propseg).
:param fname_in1: str: File name of input image #1 (mandatory)
:param fname_in2: str: File name of input image #2
:param fname_seg: str: File name of input segmentation
:param angle_line: list: Angle [in rad, wrt. vertical line, must be between -pi and pi] to apply to the line overlaid on the image, for\
each slice, for slice that don't have an angle to display, a nan is expected. To be used for assessing cord orientation.
:param args: args from parent function
:param path_qc: str: Path to save QC report
:param fps: float: Frame rate for output gif images
:param dataset: str: Dataset name
:param subject: str: Subject name
:param path_img: dict: Path to image to display (e.g., a graph), instead of computing the image from MRI.
:param process: str: Name of SCT function. e.g., sct_propseg
:return: None
"""
logger.info('\n*** Generate Quality Control (QC) html report ***')
dpi = 300
plane = None
qcslice_type = None
qcslice_operations = None
qcslice_layout = None
# Get QC specifics based on SCT process
# Axial orientation, switch between two input images
if process in ['sct_register_multimodal', 'sct_register_to_template']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_in2), Image(fname_seg)])
qcslice_operations = [QcImage.no_seg_seg]
def qcslice_layout(x): return x.mosaic()[:2]
# Rotation visualisation
elif process in ['rotation']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.line_angle]
def qcslice_layout(x): return x.mosaic(return_center=True)
# Axial orientation, switch between the image and the segmentation
elif process in ['sct_propseg', 'sct_deepseg_sc', 'sct_deepseg_gm']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.listed_seg]
def qcslice_layout(x): return x.mosaic()
# Axial orientation, switch between the image and the centerline
elif process in ['sct_get_centerline']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.label_centerline]
def qcslice_layout(x): return x.mosaic()
# Axial orientation, switch between the image and the white matter segmentation (linear interp, in blue)
elif process in ['sct_warp_template']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.template]
def qcslice_layout(x): return x.mosaic()
# Axial orientation, switch between gif image (before and after motion correction) and grid overlay
elif process in ['sct_dmri_moco', 'sct_fmri_moco']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_in2), Image(fname_seg)])
qcslice_operations = [QcImage.grid] # grid will be added in future PR
def qcslice_layout(x): return x.mosaic_through_time() # mosaic_through_time will be added in future PR
# Sagittal orientation, display vertebral labels
elif process in ['sct_label_vertebrae']:
plane = 'Sagittal'
dpi = 100 # bigger picture is needed for this special case, hence reduce dpi
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_seg)], p_resample=None)
qcslice_operations = [QcImage.label_vertebrae]
def qcslice_layout(x): return x.single()
# Sagittal orientation, display posterior labels
elif process in ['sct_label_utils']:
plane = 'Sagittal'
dpi = 100 # bigger picture is needed for this special case, hence reduce dpi
# projected_image = projected(Image(fname_seg))
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_seg)], p_resample=None)
qcslice_operations = [QcImage.label_utils]
def qcslice_layout(x): return x.single()
# Sagittal orientation, display PMJ box
elif process in ['sct_detect_pmj']:
plane = 'Sagittal'
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_seg)], p_resample=None)
qcslice_operations = [QcImage.highlight_pmj]
def qcslice_layout(x): return x.single()
# Sagittal orientation, static image
elif process in ['sct_straighten_spinalcord']:
plane = 'Sagittal'
dpi = 100
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_in1)], p_resample=None)
qcslice_operations = [QcImage.vertical_line]
def qcslice_layout(x): return x.single()
# Metric outputs (only graphs)
elif process in ['sct_process_segmentation']:
assert os.path.isfile(path_img)
else:
raise ValueError("Unrecognized process: {}".format(process))
add_entry(
src=fname_in1,
process=process,
args=args,
path_qc=path_qc,
dataset=dataset,
subject=subject,
plane=plane,
path_img=path_img,
dpi=dpi,
qcslice=qcslice_type,
qcslice_operations=qcslice_operations,
qcslice_layout=qcslice_layout,
stretch_contrast_method='equalized',
angle_line=angle_line,
fps=fps,
)
| def generate_qc(fname_in1, fname_in2=None, fname_seg=None, angle_line=None, args=None, path_qc=None, fps=None,
dataset=None, subject=None, path_img=None, process=None):
"""
Generate a QC entry allowing to quickly review results. This function is the entry point and is called by SCT
scripts (e.g. sct_propseg).
:param fname_in1: str: File name of input image #1 (mandatory)
:param fname_in2: str: File name of input image #2
:param fname_seg: str: File name of input segmentation
:param angle_line: list: Angle [in rad, wrt. vertical line, must be between -pi and pi] to apply to the line overlaid on the image, for\
each slice, for slice that don't have an angle to display, a nan is expected. To be used for assessing cord orientation.
:param args: args from parent function
:param path_qc: str: Path to save QC report
:param fps: float: Number of frames per second for output gif images
:param dataset: str: Dataset name
:param subject: str: Subject name
:param path_img: dict: Path to image to display (e.g., a graph), instead of computing the image from MRI.
:param process: str: Name of SCT function. e.g., sct_propseg
:return: None
"""
logger.info('\n*** Generate Quality Control (QC) html report ***')
dpi = 300
plane = None
qcslice_type = None
qcslice_operations = None
qcslice_layout = None
# Get QC specifics based on SCT process
# Axial orientation, switch between two input images
if process in ['sct_register_multimodal', 'sct_register_to_template']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_in2), Image(fname_seg)])
qcslice_operations = [QcImage.no_seg_seg]
def qcslice_layout(x): return x.mosaic()[:2]
# Rotation visualisation
elif process in ['rotation']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.line_angle]
def qcslice_layout(x): return x.mosaic(return_center=True)
# Axial orientation, switch between the image and the segmentation
elif process in ['sct_propseg', 'sct_deepseg_sc', 'sct_deepseg_gm']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.listed_seg]
def qcslice_layout(x): return x.mosaic()
# Axial orientation, switch between the image and the centerline
elif process in ['sct_get_centerline']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.label_centerline]
def qcslice_layout(x): return x.mosaic()
# Axial orientation, switch between the image and the white matter segmentation (linear interp, in blue)
elif process in ['sct_warp_template']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.template]
def qcslice_layout(x): return x.mosaic()
# Axial orientation, switch between gif image (before and after motion correction) and grid overlay
elif process in ['sct_dmri_moco', 'sct_fmri_moco']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_in2), Image(fname_seg)])
qcslice_operations = [QcImage.grid] # grid will be added in future PR
def qcslice_layout(x): return x.mosaic_through_time() # mosaic_through_time will be added in future PR
# Sagittal orientation, display vertebral labels
elif process in ['sct_label_vertebrae']:
plane = 'Sagittal'
dpi = 100 # bigger picture is needed for this special case, hence reduce dpi
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_seg)], p_resample=None)
qcslice_operations = [QcImage.label_vertebrae]
def qcslice_layout(x): return x.single()
# Sagittal orientation, display posterior labels
elif process in ['sct_label_utils']:
plane = 'Sagittal'
dpi = 100 # bigger picture is needed for this special case, hence reduce dpi
# projected_image = projected(Image(fname_seg))
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_seg)], p_resample=None)
qcslice_operations = [QcImage.label_utils]
def qcslice_layout(x): return x.single()
# Sagittal orientation, display PMJ box
elif process in ['sct_detect_pmj']:
plane = 'Sagittal'
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_seg)], p_resample=None)
qcslice_operations = [QcImage.highlight_pmj]
def qcslice_layout(x): return x.single()
# Sagittal orientation, static image
elif process in ['sct_straighten_spinalcord']:
plane = 'Sagittal'
dpi = 100
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_in1)], p_resample=None)
qcslice_operations = [QcImage.vertical_line]
def qcslice_layout(x): return x.single()
# Metric outputs (only graphs)
elif process in ['sct_process_segmentation']:
assert os.path.isfile(path_img)
else:
raise ValueError("Unrecognized process: {}".format(process))
add_entry(
src=fname_in1,
process=process,
args=args,
path_qc=path_qc,
dataset=dataset,
subject=subject,
plane=plane,
path_img=path_img,
dpi=dpi,
qcslice=qcslice_type,
qcslice_operations=qcslice_operations,
qcslice_layout=qcslice_layout,
stretch_contrast_method='equalized',
angle_line=angle_line,
fps=fps,
)
|
47,948 | def serializer_identifier(identifier):
if isinstance(identifier, ClipIdentifier):
return {
"type": "clip_identifier",
"video": identifier.video,
"clip_id": identifier.clip_id,
"frames": identifier.frames
}
if isinstance(identifier, MultiFramesInputIdentifier):
return {
"type": "multi_frame_identifier",
"input_id": identifier.input_id,
"frames": identifier.frames
}
if isinstance(identifier, ImagePairIdentifier):
return {
"type": "image_pair_identifier",
"first": identifier.first,
"second": identifier.second
}
if isinstance(identifier, ListIdentifier):
return {
"type": "list_identifier",
"values": identifier.values
}
return identifier
| def serialize_identifier(identifier):
if isinstance(identifier, ClipIdentifier):
return {
"type": "clip_identifier",
"video": identifier.video,
"clip_id": identifier.clip_id,
"frames": identifier.frames
}
if isinstance(identifier, MultiFramesInputIdentifier):
return {
"type": "multi_frame_identifier",
"input_id": identifier.input_id,
"frames": identifier.frames
}
if isinstance(identifier, ImagePairIdentifier):
return {
"type": "image_pair_identifier",
"first": identifier.first,
"second": identifier.second
}
if isinstance(identifier, ListIdentifier):
return {
"type": "list_identifier",
"values": identifier.values
}
return identifier
|
40,130 | def dnf_remove_packages(*packages: str):
'''
Remove packages from Fedora / RedHat / Cent systems.
:param packages: Iterable containing packages to remove.
'''
log_current_packages(packages, install=False)
return _run_shell_command_raise_on_return_code(f"sudo dnf remove -y {' '.join(packages)}", f"Error in removal of package(s) {' '.join(packages)}", True)
| def dnf_remove_packages(*packages: str):
'''
Remove packages from Fedora / RedHat / Cent systems.
:param packages: Iterable containing packages to remove.
'''
return _run_shell_command_raise_on_return_code(f'sudo dnf remove -y {" ".join(packages)}', f'Error in removal of package(s) {" ".join(packages)}', True)
return _run_shell_command_raise_on_return_code(f"sudo dnf remove -y {' '.join(packages)}", f"Error in removal of package(s) {' '.join(packages)}", True)
|
54,316 | def convertRadisToJSON():
"""Converts the ~/.radis file into json formatted file ~/.radisjson
Example
-------
original ~/.radis file format
[HITRAN-CO2-TEST]
info = HITRAN 2016 database, CO2, 1 main isotope (CO2-626), bandhead: 2380-2398 cm-1 (4165-4200 nm)
path = PATH_TO\radis\radis\test\files\hitran_co2_626_bandhead_4165_4200nm.par
format = hitran
parfuncfmt = hapi
levelsfmt = radis
-----------
Converted ~/.radisjson file format
{"HITRAN-CO2-TEST": {"info": "HITRAN 2016 database, CO2, 1 main isotope (CO2-626), bandhead: 2380-2398 cm-1 (4165-4200 nm)",
"path": "PATH_TO\\radis\\radis\\test\\files\\hitran_co2_626_bandhead_4165_4200nm.par",
"format": "hitran",
"parfuncfmt": "hapi",
"levelsfmt": "radis"}}
"""
# Loads configuration file ~/.radis
config = getConfig()
# Variable to store data in JSON format
config_json = {}
# Converting configuration into JSON format and storing in config_json variable
for i in config.sections():
temp = {}
for j in config[i]:
temp[j] = config[i][j]
config_json[i] = temp
# Creating json file
config_json_dir = CONFIG_PATH_JSON
with open(config_json_dir, "w") as outfile:
json.dump(config_json, outfile, indent=2)
outfile.close()
return
| def convertRadisToJSON():
"""Converts the ~/.radis file into json formatted file ~/.radisjson
Example
-------
original ~/.radis file format
[HITRAN-CO2-TEST]
info = HITRAN 2016 database, CO2, 1 main isotope (CO2-626), bandhead: 2380-2398 cm-1 (4165-4200 nm)
path = PATH_TO\radis\radis\test\files\hitran_co2_626_bandhead_4165_4200nm.par
format = hitran
parfuncfmt = hapi
levelsfmt = radis
-----------
Converted ~/.radisjson file format::
{"HITRAN-CO2-TEST": {"info": "HITRAN 2016 database, CO2, 1 main isotope (CO2-626), bandhead: 2380-2398 cm-1 (4165-4200 nm)",
"path": "PATH_TO\\radis\\radis\\test\\files\\hitran_co2_626_bandhead_4165_4200nm.par",
"format": "hitran",
"parfuncfmt": "hapi",
"levelsfmt": "radis"}}
"""
# Loads configuration file ~/.radis
config = getConfig()
# Variable to store data in JSON format
config_json = {}
# Converting configuration into JSON format and storing in config_json variable
for i in config.sections():
temp = {}
for j in config[i]:
temp[j] = config[i][j]
config_json[i] = temp
# Creating json file
config_json_dir = CONFIG_PATH_JSON
with open(config_json_dir, "w") as outfile:
json.dump(config_json, outfile, indent=2)
outfile.close()
return
|
41,694 | def possibly_skip_test(request, info: dict[str, Any]) -> dict[str, Any]:
for reason in (
reason for [flag, reason] in info.items() if flag.startswith("segfault")
):
pytest.skip(f"known segfault: {reason}")
for reason in (
reason for [flag, reason] in info.items() if flag.startswith("xfail")
):
if request.config.option.run_xfail:
request.applymarker(
pytest.mark.xfail(
run=False,
reason=f"known failure: {reason}",
)
)
else:
pytest.xfail(f"known failure: {reason}")
return info
| def possibly_skip_test(request, info: dict[str, Any]) -> dict[str, Any]:
for reason in (
reason for (flag, reason) in info.items() if flag.startswith("segfault")
):
pytest.skip(f"known segfault: {reason}")
for reason in (
reason for [flag, reason] in info.items() if flag.startswith("xfail")
):
if request.config.option.run_xfail:
request.applymarker(
pytest.mark.xfail(
run=False,
reason=f"known failure: {reason}",
)
)
else:
pytest.xfail(f"known failure: {reason}")
return info
|
31,472 | def test_ip_command(mocker):
"""
Given:
- a list of ips
When:
- running ip command was required
Then:
- validates that indicator objects were created as expected
"""
# from Packs.Threat_Crowd.Integrations.ThreatCrowdV2.ThreatCrowd_v2 import get_ip
from ThreatCrowd_v2 import ip_command
mock_response = get_key_from_test_data('ip_response')
mocker.patch.object(Client, 'http_request', return_value=mock_response)
res = ip_command(CLIENT, {'ip': '0.0.0.0, 1.1.1.1'})
assert res[0].outputs['value'] == "0.0.0.0"
assert res[0].indicator.dbot_score.reliability == DBotScoreReliability.C
assert res[0].indicator.dbot_score.score == 3
assert res[0].indicator.dbot_score.indicator == "0.0.0.0"
assert len(res) == 2
| def test_ip_command(mocker):
"""
Given:
- a list of ips
When:
- running ip command was required
Then:
- validates that indicator objects were created with the proper reliability, score and values.
"""
# from Packs.Threat_Crowd.Integrations.ThreatCrowdV2.ThreatCrowd_v2 import get_ip
from ThreatCrowd_v2 import ip_command
mock_response = get_key_from_test_data('ip_response')
mocker.patch.object(Client, 'http_request', return_value=mock_response)
res = ip_command(CLIENT, {'ip': '0.0.0.0, 1.1.1.1'})
assert res[0].outputs['value'] == "0.0.0.0"
assert res[0].indicator.dbot_score.reliability == DBotScoreReliability.C
assert res[0].indicator.dbot_score.score == 3
assert res[0].indicator.dbot_score.indicator == "0.0.0.0"
assert len(res) == 2
|
46,346 | def _daal_type_of_target(y):
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, str))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], str)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead - the MultiLabelBinarizer'
' transformer can convert to this format.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) != 0 and
not isinstance(y.flat[0], str)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
_daal_assert_all_finite(y)
return 'continuous' + suffix
unique = np.sort(pd.unique(y.ravel())) if pandas_is_imported else np.unique(y)
if (len(unique) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
result = 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
result = ('binary', unique) # [1, 2] or [["a"], ["b"]]
return result
| def _daal_type_of_target(y):
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, str))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], str)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead - the MultiLabelBinarizer'
' transformer can convert to this format.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) != 0 and
not isinstance(y.flat[0], str)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
_daal_assert_all_finite(y)
return 'continuous' + suffix
unique = np.sort(pd.unique(y.ravel())) if pandas_is_imported else np.unique(y)
if (len(unique) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
result = ('multiclass' + suffix, None) # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
result = ('binary', unique) # [1, 2] or [["a"], ["b"]]
return result
|
43,856 | def sample(op=None, wires=None):
r"""Sample from the supplied observable, with the number of shots
determined from the ``dev.shots`` attribute of the corresponding device.
If no observable is provided then sample from the device specific raw samples.
The samples are drawn from the eigenvalues :math:`\{\lambda_i\}` of the observable.
The probability of drawing eigenvalue :math:`\lambda_i` is given by
:math:`p(\lambda_i) = |\langle \xi_i | \psi \rangle|^2`, where :math:`| \xi_i \rangle`
is the corresponding basis state from the observable's eigenbasis.
If no observable was provided then the raw samples obtained from device are returned
(eg. for a qubit device, samples from the computational device are returned). In this
case, `wires` can be specified so that sample results only include measurement results
of the qubits of interest.
**Example 1:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2, shots=4)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.sample(qml.PauliY(0))
Executing this QNode:
>>> circuit(0.5)
array([ 1., 1., 1., -1.])
**Example 2:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2, shots=4)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.sample()
Executing this QNode:
>>> circuit(0.5)
array([[0, 1],
[0, 0],
[1, 1],
[0, 0]])
Args:
op (Observable or None): a quantum observable object
wires (Sequence[int] or int or None): the wires we wish to sample from
Raises:
QuantumFunctionError: `op` is not an instance of :class:`~.Observable`
ValueError: Cannot set wires if an observable is provided
"""
if not isinstance(op, Observable) and op is not None: # None type is also allowed for op
raise qml.QuantumFunctionError(
"{} is not an observable: cannot be used with sample".format(op.name)
)
if wires is not None:
if op is not None:
raise ValueError("Cannot set the wires if an observable is provided.")
return MeasurementProcess(Sample, obs=op, wires=qml.wires.Wires(wires))
return MeasurementProcess(Sample, obs=op)
| def sample(op=None, wires=None):
r"""Sample from the supplied observable, with the number of shots
determined from the ``dev.shots`` attribute of the corresponding device.
If no observable is provided then sample from the device specific raw samples.
The samples are drawn from the eigenvalues :math:`\{\lambda_i\}` of the observable.
The probability of drawing eigenvalue :math:`\lambda_i` is given by
:math:`p(\lambda_i) = |\langle \xi_i | \psi \rangle|^2`, where :math:`| \xi_i \rangle`
is the corresponding basis state from the observable's eigenbasis.
**Example**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2, shots=4)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.sample(qml.PauliY(0))
Executing this QNode:
>>> circuit(0.5)
array([ 1., 1., 1., -1.])
**Example 2:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2, shots=4)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.sample()
Executing this QNode:
>>> circuit(0.5)
array([[0, 1],
[0, 0],
[1, 1],
[0, 0]])
Args:
op (Observable or None): a quantum observable object
wires (Sequence[int] or int or None): the wires we wish to sample from
Raises:
QuantumFunctionError: `op` is not an instance of :class:`~.Observable`
ValueError: Cannot set wires if an observable is provided
"""
if not isinstance(op, Observable) and op is not None: # None type is also allowed for op
raise qml.QuantumFunctionError(
"{} is not an observable: cannot be used with sample".format(op.name)
)
if wires is not None:
if op is not None:
raise ValueError("Cannot set the wires if an observable is provided.")
return MeasurementProcess(Sample, obs=op, wires=qml.wires.Wires(wires))
return MeasurementProcess(Sample, obs=op)
|
41,060 | def griffin_lim(spc, n_fft, n_shift, win_length, window='hann', n_iters=100):
"""Convert linear spectrogram into waveform using Griffin-Lim.
Args:
spc (ndarray): Linear spectrogram (T, n_fft // 2 + 1).
n_fft (int): Number of FFT points.
n_shift (int): Shift size in points.
win_length (int): Window length in points.
window (str, optional): Window function type.
n_iters (int, optionl): Number of iterations of Griffin-Lim Algorithm.
Returns:
ndarray: Reconstructed waveform (N,).
"""
# assert the size of input linear spectrogram
assert spc.shape[1] == n_fft // 2 + 1
if LooseVersion(librosa.__version__) >= LooseVersion('0.7.0'):
# use librosa's fast Grriffin-Lim algorithm
spc = np.abs(spc.T)
y = librosa.griffinlim(
S=spc,
n_iter=n_iters,
hop_length=n_shift,
win_length=win_length,
window=window
)
else:
# use slower version of Grriffin-Lim algorithm
logging.warning("librosa version is old. use slow version of Grriffin-Lim algorithm.")
logging.warning("if you wanto use fast Griffin-Lim, "
"please update via `source ./path.sh && pip install librosa>=0.7.0`.")
cspc = np.abs(spc).astype(np.complex).T
angles = np.exp(2j * np.pi * np.random.rand(*cspc.shape))
y = librosa.istft(cspc * angles, n_shift, win_length, window=window)
for i in range(n_iters):
angles = np.exp(1j * np.angle(librosa.stft(y, n_fft, n_shift, win_length, window=window)))
y = librosa.istft(cspc * angles, n_shift, win_length, window=window)
return y
| def griffin_lim(spc, n_fft, n_shift, win_length, window='hann', n_iters=100):
"""Convert linear spectrogram into waveform using Griffin-Lim.
Args:
spc (ndarray): Linear spectrogram (T, n_fft // 2 + 1).
n_fft (int): Number of FFT points.
n_shift (int): Shift size in points.
win_length (int): Window length in points.
window (str, optional): Window function type.
n_iters (int, optionl): Number of iterations of Griffin-Lim Algorithm.
Returns:
ndarray: Reconstructed waveform (N,).
"""
# assert the size of input linear spectrogram
assert spc.shape[1] == n_fft // 2 + 1
if LooseVersion(librosa.__version__) >= LooseVersion('0.7.0'):
# use librosa's fast Grriffin-Lim algorithm
spc = np.abs(spc.T)
y = librosa.griffinlim(
S=spc,
n_iter=n_iters,
hop_length=n_shift,
win_length=win_length,
window=window
)
else:
# use slower version of Grriffin-Lim algorithm
logging.warning("librosa version is old. use slow version of Grriffin-Lim algorithm.")
logging.warning("if you want to use fast Griffin-Lim, "
"please update via `source ./path.sh && pip install librosa>=0.7.0`.")
cspc = np.abs(spc).astype(np.complex).T
angles = np.exp(2j * np.pi * np.random.rand(*cspc.shape))
y = librosa.istft(cspc * angles, n_shift, win_length, window=window)
for i in range(n_iters):
angles = np.exp(1j * np.angle(librosa.stft(y, n_fft, n_shift, win_length, window=window)))
y = librosa.istft(cspc * angles, n_shift, win_length, window=window)
return y
|
44,203 | def factorize(two, tol):
r"""Return double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha},
where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right)
\phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|}
dr_1 dr_2.
Rearranging the integrals in the chemist notation, [11|22], gives
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}.
with
.. math::
T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}.
and :math:`V` is the two-electron tensor in chemist notation.
The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that
.. math::
V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}.
with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized
and truncated in a second level of factorization.
The algorithm has the following steps
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_].
1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \
:math:`n^2 \times n^2` matrix where n is the number of orbitals.
2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \
corresponding eigenvalues larger than a threshold.
3. Reshape the selected eigenvectors to :math:`n \times n` matrices.
4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \
eigenvalues is larger than a threshold.
Args:
two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis
arranged in chemist notation [11|22]
tol (float): cutoff value for discarding the negligible factors
Returns:
tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron
tensor, eigenvalues of the generated factors, eigenvectors of the generated factors
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation
>>> l, w, v = factorize(two, 1e-5)
>>> print(l)
[[[ 1.06723440e-01 9.73575768e-15]
[ 8.36288956e-15 -1.04898533e-01]]
[[-2.20945401e-13 -4.25688222e-01]
[-4.25688222e-01 -2.98228790e-13]]
[[-8.14472856e-01 5.01669019e-13]
[ 5.01689072e-13 -8.28642140e-01]]]
"""
n = two.shape[0]
two = two.reshape(n * n, n * n)
eigvals, eigvecs = np.linalg.eigh(two)
eigvals = np.array([val for val in eigvals if abs(val) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals)))
factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))])
eigvals, eigvecs = np.linalg.eigh(factors)
eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
return factors, eigvals, eigvecs
| def factorize(two, tol):
r"""Return double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha},
where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right)
\phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|}
dr_1 dr_2.
Rearranging the integrals in the chemist notation, [11|22], gives
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}.
with
.. math::
T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}.
and :math:`V` is the two-electron tensor in chemist notation.
The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that
.. math::
V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}.
with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized
and truncated in a second level of factorization.
The algorithm has the following steps
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_].
1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \
:math:`n^2 \times n^2` matrix where n is the number of orbitals.
2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \
corresponding eigenvalues larger than a threshold.
3. Reshape the selected eigenvectors to :math:`n \times n` matrices.
4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \
eigenvalues is larger than a threshold.
Args:
two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis
arranged in chemist notation [11|22]
tol (float): cutoff value for discarding the negligible factors
Returns:
tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron
tensor, eigenvalues of the generated factors, and eigenvectors of the generated factors
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation
>>> l, w, v = factorize(two, 1e-5)
>>> print(l)
[[[ 1.06723440e-01 9.73575768e-15]
[ 8.36288956e-15 -1.04898533e-01]]
[[-2.20945401e-13 -4.25688222e-01]
[-4.25688222e-01 -2.98228790e-13]]
[[-8.14472856e-01 5.01669019e-13]
[ 5.01689072e-13 -8.28642140e-01]]]
"""
n = two.shape[0]
two = two.reshape(n * n, n * n)
eigvals, eigvecs = np.linalg.eigh(two)
eigvals = np.array([val for val in eigvals if abs(val) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals)))
factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))])
eigvals, eigvecs = np.linalg.eigh(factors)
eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
return factors, eigvals, eigvecs
|
20,296 | def detect_cpu(compilers: CompilersDict):
if mesonlib.is_windows():
trial = detect_windows_arch(compilers)
elif mesonlib.is_freebsd() or mesonlib.is_netbsd() or mesonlib.is_openbsd() or mesonlib.is_aix():
trial = platform.processor().lower()
else:
trial = platform.machine().lower()
if trial in ('amd64', 'x64', 'i86pc'):
trial = 'x86_64'
if trial == 'x86_64':
# Same check as above for cpu_family
if any_compiler_has_define(compilers, '__i386__'):
trial = 'i686' # All 64 bit cpus have at least this level of x86 support.
elif trial == 'aarch64':
# Same check as above for cpu_family
if any_compiler_has_define(compilers, '__arm__'):
trial = 'arm'
elif trial.startswith('earm'):
trial = 'arm'
elif trial == 'e2k':
# Make more precise CPU detection for Elbrus platform.
trial = platform.processor().lower()
elif trial.startswith('mips'):
if not '64' in trial:
trial = 'mips'
else:
trial = 'mips64'
# Add more quirks here as bugs are reported. Keep in sync with
# detect_cpu_family() above.
return trial
| def detect_cpu(compilers: CompilersDict):
if mesonlib.is_windows():
trial = detect_windows_arch(compilers)
elif mesonlib.is_freebsd() or mesonlib.is_netbsd() or mesonlib.is_openbsd() or mesonlib.is_aix():
trial = platform.processor().lower()
else:
trial = platform.machine().lower()
if trial in ('amd64', 'x64', 'i86pc'):
trial = 'x86_64'
if trial == 'x86_64':
# Same check as above for cpu_family
if any_compiler_has_define(compilers, '__i386__'):
trial = 'i686' # All 64 bit cpus have at least this level of x86 support.
elif trial == 'aarch64':
# Same check as above for cpu_family
if any_compiler_has_define(compilers, '__arm__'):
trial = 'arm'
elif trial.startswith('earm'):
trial = 'arm'
elif trial == 'e2k':
# Make more precise CPU detection for Elbrus platform.
trial = platform.processor().lower()
elif trial.startswith('mips'):
if '64' not in trial:
trial = 'mips'
else:
trial = 'mips64'
# Add more quirks here as bugs are reported. Keep in sync with
# detect_cpu_family() above.
return trial
|
31,537 | def ip_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> List[CommandResults]:
"""
Returns IP's reputation
"""
ips = argToList(args.get('ip'))
reliability = params.get('feedReliability')
results: List[CommandResults] = list()
for ip in ips:
if not is_ip_valid(ip, accept_v6_ips=True): # check IP's validity
raise ValueError(f'IP "{ip}" is not valid')
try:
raw_response = client.ip(ip)
except Exception as exception:
# If anything happens, just keep going
demisto.debug(f'Could not process IP: "{ip}"\n {str(exception)}')
continue
if data := raw_response.get('data'):
score = calculate_dbot_score(reputation_data=data, params=params)
num_of_engines, num_of_positive_engines = calculate_engines(reputation_data=data)
for data_entry in data:
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability,
malicious_description=data_entry.get('description')
)
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Result for IP: {ip}:', data_entry)
ip_indicator = Common.IP(
ip=ip,
dbot_score=dbot_score,
detection_engines=num_of_engines,
positive_engines=num_of_positive_engines
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.IP',
outputs_key_field='id',
outputs=data_entry,
indicator=ip_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
else: # no data
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=Common.DBotScore.NONE,
reliability=reliability
)
readable_output = f'{CONTEXT_PREFIX} does not have details about IP: {ip} \n'
ip_indicator = Common.IP(
ip=ip,
dbot_score=dbot_score,
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.IP',
outputs_key_field='id',
outputs=data,
indicator=ip_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
return results
| def ip_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> List[CommandResults]:
"""
Returns IP's reputation
"""
ips = argToList(args.get('ip'))
reliability = params.get('feedReliability')
results: List[CommandResults] = list()
for ip in ips:
if not is_ip_valid(ip, accept_v6_ips=True): # check IP's validity
raise ValueError(f'IP "{ip}" is not valid')
try:
raw_response = client.ip(ip)
except Exception as exception:
# If anything happens, just keep going
demisto.debug(f'Could not process IP: "{ip}"\n {str(exception)}')
continue
if data := raw_response.get('data'):
score = calculate_dbot_score(reputation_data=data, params=params)
num_of_engines, num_of_positive_engines = calculate_engines(reputation_data=data)
for data_entry in data:
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability,
malicious_description=data_entry.get('description')
)
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Result for IP {ip}', data_entry)
ip_indicator = Common.IP(
ip=ip,
dbot_score=dbot_score,
detection_engines=num_of_engines,
positive_engines=num_of_positive_engines
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.IP',
outputs_key_field='id',
outputs=data_entry,
indicator=ip_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
else: # no data
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=Common.DBotScore.NONE,
reliability=reliability
)
readable_output = f'{CONTEXT_PREFIX} does not have details about IP: {ip} \n'
ip_indicator = Common.IP(
ip=ip,
dbot_score=dbot_score,
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.IP',
outputs_key_field='id',
outputs=data,
indicator=ip_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
return results
|
41,911 | def custom_cv_fun(
lgbm_params,
X,
y,
objective="binary",
eval_metric="auc",
eval_name="valid",
n_estimators=100,
early_stopping_rounds=10,
nfold=5,
random_state=123,
callbacks=[],
verbose=False,
):
# create placeholders for results
fold_best_iterations = []
fold_best_scores = []
# get feature names
feature_names = list(X.columns)
# split data into k folds
kf = KFold(n_splits=nfold, shuffle=True, random_state=random_state)
splits = kf.split(X, y)
# iterate over folds
for n, (train_index, valid_index) in enumerate(splits):
# subset train and valid (out-of-fold) parts of the fold
X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
# select estimator for the current objective
lgbm_cv_fun = None
if objective in ["binary", "classification"]:
lgbm_cv_fun = LGBMClassifier
elif objective == "regression":
lgbm_cv_fun = LGBMRegressor
model = lgbm_cv_fun(n_estimators=n_estimators, random_state=random_state)
# pass hyperparameters dict to the estimator
model.set_params(**lgbm_params)
# train the model
model.fit(
X_train,
y_train.values.ravel(),
eval_set=(X_valid, y_valid.values.ravel()),
eval_metric=[eval_metric], # note: a list required
eval_names=[eval_name], # note: a list required
early_stopping_rounds=early_stopping_rounds,
verbose=verbose,
feature_name=feature_names,
callbacks=callbacks,
)
# collect current fold data
fold_best_iterations.append(model.best_iteration_)
fold_best_scores.append(model.best_score_[eval_name])
# average folds iterations numbers
folds_data = {}
folds_data["best_iterations_mean"] = -1
if fold_best_iterations[0] is not None:
folds_data["best_iterations_mean"] = int(np.mean(fold_best_iterations))
# collect metrics for best scores in each fold
fold_best_score = {}
for metric in fold_best_scores[0].keys():
fold_best_score[metric] = [fold[metric] for fold in fold_best_scores]
# avearage folds metrics (for all metrics)
for metric in fold_best_scores[0].keys():
folds_data["eval_mean-" + metric] = np.mean(fold_best_score[metric])
return {
"folds_mean_data": folds_data,
"feature_names": feature_names,
"fold_best_iter": fold_best_iterations,
"fold_best_score": fold_best_score,
}
| def custom_cv_fun(
lgbm_params,
X,
y,
objective="binary",
eval_metric="auc",
eval_name="valid",
n_estimators=100,
early_stopping_rounds=10,
nfold=5,
random_state=123,
callbacks=[],
verbose=False,
):
# create placeholders for results
fold_best_iterations = []
fold_best_scores = []
# get feature names
feature_names = list(X.columns)
# split data into k folds
kf = KFold(n_splits=nfold, shuffle=True, random_state=random_state)
splits = kf.split(X, y)
# iterate over folds
for train_index, valid_index in splits:
# subset train and valid (out-of-fold) parts of the fold
X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
# select estimator for the current objective
lgbm_cv_fun = None
if objective in ["binary", "classification"]:
lgbm_cv_fun = LGBMClassifier
elif objective == "regression":
lgbm_cv_fun = LGBMRegressor
model = lgbm_cv_fun(n_estimators=n_estimators, random_state=random_state)
# pass hyperparameters dict to the estimator
model.set_params(**lgbm_params)
# train the model
model.fit(
X_train,
y_train.values.ravel(),
eval_set=(X_valid, y_valid.values.ravel()),
eval_metric=[eval_metric], # note: a list required
eval_names=[eval_name], # note: a list required
early_stopping_rounds=early_stopping_rounds,
verbose=verbose,
feature_name=feature_names,
callbacks=callbacks,
)
# collect current fold data
fold_best_iterations.append(model.best_iteration_)
fold_best_scores.append(model.best_score_[eval_name])
# average folds iterations numbers
folds_data = {}
folds_data["best_iterations_mean"] = -1
if fold_best_iterations[0] is not None:
folds_data["best_iterations_mean"] = int(np.mean(fold_best_iterations))
# collect metrics for best scores in each fold
fold_best_score = {}
for metric in fold_best_scores[0].keys():
fold_best_score[metric] = [fold[metric] for fold in fold_best_scores]
# avearage folds metrics (for all metrics)
for metric in fold_best_scores[0].keys():
folds_data["eval_mean-" + metric] = np.mean(fold_best_score[metric])
return {
"folds_mean_data": folds_data,
"feature_names": feature_names,
"fold_best_iter": fold_best_iterations,
"fold_best_score": fold_best_score,
}
|
31,765 | def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
headers['PRIVATE-TOKEN'] = f'{params["api_key"]}'
command = demisto.command()
LOG(f'Command being called is {command}')
try:
urllib3.disable_warnings()
client = Client(urljoin(url, ""), verify_certificate, proxy, headers=headers)
commands = {
'gitlab-get-projects': get_projects_command,
'gitlab-projects-get-access-requests': projects_get_access_requests_command,
'gitlab-projects-request-access': projects_request_access_command,
'gitlab-projects-approve-access': projects_approve_access_command,
'gitlab-projects-deny-access': projects_deny_access_command,
'gitlab-projects-get-repository-branches': projects_get_repository_branches_command,
'gitlab-projects-create-repository-branch': projects_create_repository_branch_command,
'gitlab-projects-delete-repository-branch': projects_delete_repository_branch_command,
'gitlab-projects-delete-repository-merged-branches': projects_delete_repository_merged_branches_command,
'gitlab-get-version': get_version_command,
'gitlab-pipelines-schedules-list': gitlab_pipelines_schedules_list_command,
'gitlab-pipelines-list': gitlab_pipelines_list_command,
'gitlab-jobs-list': gitlab_jobs_list_command,
'gitlab-artifact-get': gitlab_artifact_get_command,
'gitlab-merge-requests-list': gitlab_merge_requests_list_command,
'gitlab-get-merge-request': gitlab_get_merge_request_command,
'gitlab-issues-list': gitlab_issues_list_command,
'gitlab-create-issue': gitlab_create_issue_command,
'gitlab-edit-issue': gitlab_edit_issue_command,
'gitlab-group-projects-list': gitlab_group_projects_list_command,
'gitlab-get-raw-file': gitlab_get_raw_file_command
}
if command == 'test-module':
test_module(client)
else:
return_results(commands[command](client, args))
except Exception as e:
return_error(str(e))
| def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
headers['PRIVATE-TOKEN'] = f'{params["api_key"]}'
command = demisto.command()
LOG(f'Command being called is {command}')
try:
urllib3.disable_warnings()
client = Client(urljoin(url, ""), verify_certificate, proxy, headers=headers)
commands = {
'gitlab-get-projects': get_projects_command,
'gitlab-projects-get-access-requests': projects_get_access_requests_command,
'gitlab-projects-request-access': projects_request_access_command,
'gitlab-projects-approve-access': projects_approve_access_command,
'gitlab-projects-deny-access': projects_deny_access_command,
'gitlab-projects-get-repository-branches': projects_get_repository_branches_command,
'gitlab-projects-create-repository-branch': projects_create_repository_branch_command,
'gitlab-projects-delete-repository-branch': projects_delete_repository_branch_command,
'gitlab-projects-delete-repository-merged-branches': projects_delete_repository_merged_branches_command,
'gitlab-get-version': get_version_command,
'gitlab-pipelines-schedules-list': gitlab_pipelines_schedules_list_command,
'gitlab-pipelines-list': gitlab_pipelines_list_command,
'gitlab-jobs-list': gitlab_jobs_list_command,
'gitlab-artifact-get': gitlab_artifact_get_command,
'gitlab-merge-requests-list': gitlab_merge_requests_list_command,
'gitlab-merge-request-get': gitlab_get_merge_request_command,
'gitlab-issues-list': gitlab_issues_list_command,
'gitlab-create-issue': gitlab_create_issue_command,
'gitlab-edit-issue': gitlab_edit_issue_command,
'gitlab-group-projects-list': gitlab_group_projects_list_command,
'gitlab-get-raw-file': gitlab_get_raw_file_command
}
if command == 'test-module':
test_module(client)
else:
return_results(commands[command](client, args))
except Exception as e:
return_error(str(e))
|
58,180 | def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
secret_key = demisto.params().get('credentials').get('password')
client_key = demisto.params().get('credentials').get('identifier')
organisation_id = demisto.params().get('organization_id')
# get the service API url
base_url = demisto.params().get("api_url")
# How much time before the first fetch to retrieve incidents
proxy = demisto.params().get('proxy', False)
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
organisation_id=organisation_id,
secret_key=secret_key,
client_key=client_key,
proxy=proxy)
commands = {
'test-module': test_module,
"umbrella-reporting-destination-list":
get_destinations_list,
"umbrella-reporting-category-list":
get_categories_list,
"umbrella-reporting-identity-list":
get_identities_list,
"umbrella-reporting-event-type-list":
get_event_types_list,
"umbrella-reporting-file-list":
get_file_list,
"umbrella-reporting-threat-list":
get_threat_list,
"umbrella-reporting-activity-list":
get_activity_list,
"umbrella-reporting-activity-get":
get_activity_by_traffic_type,
"umbrella-reporting-summary-list":
get_summary_list
}
args = demisto.args()
command = demisto.command()
if command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError
# Log exceptions
except Exception as e:
return_error(
f'Failed to execute {demisto.command()} command. Error: {str(e)}')
| def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
args = demisto.args()
command = demisto.command()
params = demisto.params()
secret_key = params.get('credentials', {}).get('password')
client_key = params.get('credentials', {}).get('identifier')
organisation_id = params.get('organization_id')
# get the service API url
base_url = demisto.params().get("api_url")
# How much time before the first fetch to retrieve incidents
proxy = demisto.params().get('proxy', False)
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
organisation_id=organisation_id,
secret_key=secret_key,
client_key=client_key,
proxy=proxy)
commands = {
'test-module': test_module,
"umbrella-reporting-destination-list":
get_destinations_list,
"umbrella-reporting-category-list":
get_categories_list,
"umbrella-reporting-identity-list":
get_identities_list,
"umbrella-reporting-event-type-list":
get_event_types_list,
"umbrella-reporting-file-list":
get_file_list,
"umbrella-reporting-threat-list":
get_threat_list,
"umbrella-reporting-activity-list":
get_activity_list,
"umbrella-reporting-activity-get":
get_activity_by_traffic_type,
"umbrella-reporting-summary-list":
get_summary_list
}
args = demisto.args()
command = demisto.command()
if command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError
# Log exceptions
except Exception as e:
return_error(
f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
47,881 | def try_download(reporter, file, num_attempts, start_download, size):
for attempt in range(num_attempts):
if attempt != 0:
retry_delay = 10
reporter.print("Will retry in {} seconds...", retry_delay, flush=True)
time.sleep(retry_delay)
try:
chunk_iterable = start_download()
file.seek(0)
file.truncate()
actual_size = process_download(reporter, chunk_iterable, size, file)
if actual_size > size:
reporter.log_error("Remote file is longer than expected ({} B)", size)
# no sense in retrying - if the file is longer, there's no way it'll fix itself
return False
elif actual_size < size:
reporter.log_error("Downloaded file is shorter ({} B) than expected ({} B)",
actual_size, size)
# it's possible that we got disconnected before receiving the full file,
# so try again
else:
return True
except (requests.exceptions.RequestException, ssl.SSLError):
reporter.log_error("Download failed", exc_info=True)
return False
| def try_download(reporter, file, num_attempts, start_download, size):
for attempt in range(num_attempts):
if attempt != 0:
retry_delay = 10
reporter.print("Will retry in {} seconds...", retry_delay, flush=True)
time.sleep(retry_delay)
try:
chunk_iterable = start_download()
file.seek(0)
file.truncate()
actual_size = process_download(reporter, chunk_iterable, size, file)
if actual_size > size:
reporter.log_error("Remote file is longer than expected ({} B), download aborted", size)
# no sense in retrying - if the file is longer, there's no way it'll fix itself
return False
elif actual_size < size:
reporter.log_error("Downloaded file is shorter ({} B) than expected ({} B)",
actual_size, size)
# it's possible that we got disconnected before receiving the full file,
# so try again
else:
return True
except (requests.exceptions.RequestException, ssl.SSLError):
reporter.log_error("Download failed", exc_info=True)
return False
|
34,517 | def test_binary_featurizer_correctly_encodes_state():
"""
Check that all the attributes are correctly featurized when they should and not featurized when shouldn't;
"""
f = BinarySingleStateFeaturizer()
f._default_feature_states[INTENT] = {"a": 0, "b": 1}
f._default_feature_states[ACTION_NAME] = {"c": 0, "d": 1, "action_listen": 2}
f._default_feature_states[SLOTS] = {"e_0": 0, "f_0": 1, "g_0": 2}
f._default_feature_states[ACTIVE_LOOP] = {"h": 0, "i": 1, "j": 2, "k": 3}
encoded = f.encode_state(
{
"user": {"intent": "a"},
"prev_action": {"action_name": "d"},
"active_loop": {"name": "i"},
"slots": {"g": (1.0,)},
},
interpreter=None,
)
# user input is ignored as prev action is not action_listen;
assert list(encoded.keys()) == [ACTION_NAME, ACTIVE_LOOP, SLOTS]
assert (
encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 1, 0]])
).nnz == 0
assert (
encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 1, 0, 0]])
).nnz == 0
assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])).nnz == 0
encoded = f.encode_state(
{
"user": {"intent": "a"},
"prev_action": {"action_name": "action_listen"},
"active_loop": {"name": "k"},
"slots": {"e": (1.0,)},
},
interpreter=None,
)
assert list(encoded.keys()) == [INTENT, ACTION_NAME, ACTIVE_LOOP, SLOTS]
assert (encoded[INTENT][0].features != scipy.sparse.coo_matrix([[1, 0]])).nnz == 0
assert (
encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])
).nnz == 0
assert (
encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])
).nnz == 0
assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0
| def test_single_state_featurizer_correctly_encodes_state():
"""
Check that all the attributes are correctly featurized when they should and not featurized when shouldn't;
"""
f = BinarySingleStateFeaturizer()
f._default_feature_states[INTENT] = {"a": 0, "b": 1}
f._default_feature_states[ACTION_NAME] = {"c": 0, "d": 1, "action_listen": 2}
f._default_feature_states[SLOTS] = {"e_0": 0, "f_0": 1, "g_0": 2}
f._default_feature_states[ACTIVE_LOOP] = {"h": 0, "i": 1, "j": 2, "k": 3}
encoded = f.encode_state(
{
"user": {"intent": "a"},
"prev_action": {"action_name": "d"},
"active_loop": {"name": "i"},
"slots": {"g": (1.0,)},
},
interpreter=None,
)
# user input is ignored as prev action is not action_listen;
assert list(encoded.keys()) == [ACTION_NAME, ACTIVE_LOOP, SLOTS]
assert (
encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 1, 0]])
).nnz == 0
assert (
encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 1, 0, 0]])
).nnz == 0
assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])).nnz == 0
encoded = f.encode_state(
{
"user": {"intent": "a"},
"prev_action": {"action_name": "action_listen"},
"active_loop": {"name": "k"},
"slots": {"e": (1.0,)},
},
interpreter=None,
)
assert list(encoded.keys()) == [INTENT, ACTION_NAME, ACTIVE_LOOP, SLOTS]
assert (encoded[INTENT][0].features != scipy.sparse.coo_matrix([[1, 0]])).nnz == 0
assert (
encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])
).nnz == 0
assert (
encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])
).nnz == 0
assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0
|
2,727 | def generate_valid_param(constraint):
"""Return a value that does satisfy a constraint.
This is only useful for testing purpose.
Parameters
----------
constraint : Constraint
The constraint to generate a value for.
Returns
-------
val : object
A value that does satisfy the constraint.
"""
if isinstance(constraint, _ArrayLikes):
return np.array([1, 2, 3])
elif isinstance(constraint, _SparseMatrices):
return csr_matrix([[0, 1], [1, 0]])
elif isinstance(constraint, _RandomStates):
return np.random.RandomState(42)
elif isinstance(constraint, _Callables):
return lambda x: x
elif isinstance(constraint, _NoneConstraint):
return None
elif isinstance(constraint, _InstancesOf):
return constraint.type()
if isinstance(constraint, StrOptions):
for option in constraint.options:
return option
elif isinstance(constraint, Interval):
interval = constraint
if interval.type is Real:
return (interval.left + interval.right) / 2
else:
if interval.closed in ("left", "both"):
return interval.left
else:
return interval.left + 1
| def generate_valid_param(constraint):
"""Return a value that does satisfy a constraint.
This is only useful for testing purpose.
Parameters
----------
constraint : Constraint instance
The constraint to generate a value for.
Returns
-------
val : object
A value that does satisfy the constraint.
"""
if isinstance(constraint, _ArrayLikes):
return np.array([1, 2, 3])
elif isinstance(constraint, _SparseMatrices):
return csr_matrix([[0, 1], [1, 0]])
elif isinstance(constraint, _RandomStates):
return np.random.RandomState(42)
elif isinstance(constraint, _Callables):
return lambda x: x
elif isinstance(constraint, _NoneConstraint):
return None
elif isinstance(constraint, _InstancesOf):
return constraint.type()
if isinstance(constraint, StrOptions):
for option in constraint.options:
return option
elif isinstance(constraint, Interval):
interval = constraint
if interval.type is Real:
return (interval.left + interval.right) / 2
else:
if interval.closed in ("left", "both"):
return interval.left
else:
return interval.left + 1
|
55,117 | def _reconstruct_equ(fun, num_frequency, fun_at_zero=None):
r"""Reconstruct a univariate Fourier series with consecutive integer
frequencies, using trigonometric interpolation and equidistant shifts.
This technique is based on
`Dirichlet kernels <https://en.wikipedia.org/wiki/Dirichlet_kernel>`_, see
`Vidal and Theis (2018) <https://arxiv.org/abs/1812.06323>`_ or
`Wierichs et al. (2021) <https://arxiv.org/abs/2107.12390>`_.
Args:
fun (callable): Function to reconstruct
num_frequency (int): Number of integer frequencies in ``fun``
fun_at_zero (float): Value of ``fun`` at zero; Providing ``fun_at_zero`` saves one
evaluation of ``fun``
Returns:
callable: Reconstructed Fourier series with ``num_frequency`` frequencies,
as ``qml.numpy`` based function.
"""
if not abs(int(num_frequency)) == num_frequency:
raise ValueError(f"num_frequency must be a non-negative integer, got {num_frequency}")
a, b = (num_frequency + 0.5) / np.pi, 0.5 / np.pi
shifts_pos = qml.math.arange(1, num_frequency + 1) / a
shifts_neg = -shifts_pos[::-1]
fun_at_zero = fun(qml.math.array(0.0)) if fun_at_zero is None else fun_at_zero
evals = list(map(fun, shifts_neg)) + [fun_at_zero] + list(map(fun, shifts_pos))
shifts = qml.math.concatenate([shifts_neg, [0.0], shifts_pos])
def _reconstruction(x):
"""Univariate reconstruction based on equidistant shifts and Dirichlet kernels."""
return qml.math.tensordot(
qml.math.sinc(a * (x - shifts)) / qml.math.sinc(b * (x - shifts)),
evals,
axes=[[0], [0]],
)
return _reconstruction
| def _reconstruct_equ(fun, num_frequency, fun_at_zero=None):
r"""Reconstruct a univariate Fourier series with consecutive integer
frequencies, using trigonometric interpolation and equidistant shifts.
This technique is based on
`Dirichlet kernels <https://en.wikipedia.org/wiki/Dirichlet_kernel>`_, see
`Vidal and Theis (2018) <https://arxiv.org/abs/1812.06323>`_ or
`Wierichs et al. (2021) <https://arxiv.org/abs/2107.12390>`_.
Args:
fun (callable): Function to reconstruct
num_frequency (int): Number of integer frequencies in ``fun``
fun_at_zero (float): Value of ``fun`` at zero; Providing ``fun_at_zero`` saves one
evaluation of ``fun``
Returns:
callable: Reconstructed Fourier series with ``num_frequency`` frequencies,
as ``qml.numpy`` based function.
"""
if not abs(int(num_frequency)) == num_frequency:
raise ValueError(f"num_frequency must be a non-negative integer, got {num_frequency}")
a = (num_frequency + 0.5) / np.pi
b = 0.5 / np.pi
shifts_pos = qml.math.arange(1, num_frequency + 1) / a
shifts_neg = -shifts_pos[::-1]
fun_at_zero = fun(qml.math.array(0.0)) if fun_at_zero is None else fun_at_zero
evals = list(map(fun, shifts_neg)) + [fun_at_zero] + list(map(fun, shifts_pos))
shifts = qml.math.concatenate([shifts_neg, [0.0], shifts_pos])
def _reconstruction(x):
"""Univariate reconstruction based on equidistant shifts and Dirichlet kernels."""
return qml.math.tensordot(
qml.math.sinc(a * (x - shifts)) / qml.math.sinc(b * (x - shifts)),
evals,
axes=[[0], [0]],
)
return _reconstruction
|
30,894 | def fetch_indicators_command(client, indicator_type: list, max_fetch: int = None) -> list:
""" fetch indicators from the OpenCTI
Args:
client: OpenCTI Client object
indicator_type(list): List of indicators types to get.
max_fetch: int max indicators to fetch
Returns:
list of indicators(list)
"""
last_run_id = demisto.getIntegrationContext().get('last_run_id')
new_last_run, indicators_list = get_indicators(client, indicator_type, last_run_id, max_fetch)
if new_last_run:
demisto.setIntegrationContext({'last_run_id': new_last_run})
return indicators_list
| def fetch_indicators_command(client, indicator_type: list, max_fetch: int = None) -> list:
""" fetch indicators from the OpenCTI
Args:
client: OpenCTI Client object
indicator_type(list): List of indicators types to get.
max_fetch: int max indicators to fetch
Returns:
list of indicators(list)
"""
last_run_id = demisto.getIntegrationContext().get('last_run_id')
new_last_run, indicators_list = get_indicators(client, indicator_type, last_run_id, max_fetch=max_fetch)
if new_last_run:
demisto.setIntegrationContext({'last_run_id': new_last_run})
return indicators_list
|
30,228 | def _build_arg_parser():
p = argparse.ArgumentParser(description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument("-d", "--dicom_dir",
required=True, nargs="+",
help="DICOM files directory.")
p.add_argument("-o", "--output_dir",
required=False, default=DEFAULT.cliOutputDir,
help="Output BIDS directory."
" (Default: %(default)s)")
p.add_argument('-f',
dest='overwrite', action='store_true',
help='Force overwriting of the output files.')
return p
| def _build_arg_parser():
p = argparse.ArgumentParser(description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument("-d", "--dicom_dir",
required=True, nargs="+",
help="DICOM files directory.")
p.add_argument("-o", "--output_dir",
required=False, default=DEFAULT.cliOutputDir,
help="Output BIDS directory."
" (Default: %(default)s)")
p.add_argument('-f',
dest='overwrite', action='store_true',
help='Force command to overwrite existing output files.')
return p
|
49,978 | def parse_output(x) -> bytes:
try:
address = parse_address(x)
return bfh(address_to_script(address))
except Exception:
pass
try:
script = parse_script(x)
return bfh(script)
except Exception:
pass
raise Exception("Invalid address or script.")
| def parse_output(x) -> bytes:
if is_address(x):
return bfh(address_to_script(address))
try:
script = parse_script(x)
return bfh(script)
except Exception:
pass
raise Exception("Invalid address or script.")
|
45,730 | def forecast(
vil,
velocity,
timesteps,
rainrate=None,
n_cascade_levels=8,
extrap_method="semilagrangian",
ar_order=2,
ar_window_radius=50,
r_vil_window_radius=3,
fft_method="numpy",
apply_rainrate_mask=True,
num_workers=1,
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast by using the autoregressive nowcasting using VIL
(ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast.
The key features are:
1) Growth and decay: implemented by using a cascade decomposition and
a multiscale autoregressive integrated ARI(p,1) model. Instead of the
original time series, the ARI model is applied to the differenced one
corresponding to time derivatives.
2) Originally designed for using integrated liquid (VIL) as the input data.
In this case, the rain rate (R) is obtained from VIL via an empirical
relation. This implementation is more general so that the input can be
any two-dimensional precipitation field.
3) The parameters of the ARI model and the R(VIL) relation are allowed to
be spatially variable. The estimation is done using a moving window.
Parameters
----------
vil: array_like
Array of shape (ar_order+2,m,n) containing the input fields ordered by
timestamp from oldest to newest. The inputs are expected to contain VIL
or rain rate. The time steps between the inputs are assumed to be regular.
velocity: array_like
Array of shape (2,m,n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs. All values are required to be finite.
timesteps: int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
rainrate: array_like
Array of shape (m,n) containing the most recently observed rain rate
field. If set to None, no R(VIL) conversion is done and the outputs
are in the same units as the inputs.
n_cascade_levels: int, optional
The number of cascade levels to use.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
ar_order: int, optional
The order of the autoregressive model to use. The recommended values
are 1 or 2. Using a higher-order model is strongly discouraged because
the stationarity of the AR process cannot be guaranteed.
ar_window_radius: int, optional
The radius of the window to use for determining the parameters of the
autoregressive model. Set to None to disable localization.
r_vil_window_radius: int, optional
The radius of the window to use for determining the R(VIL) relation.
Applicable if rainrate is not None.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
apply_rainrate_mask: bool
Apply mask to prevent producing precipitation to areas where it was not
originally observed. Defaults to True. Disabling this may improve some
verification metrics but increases the number of false alarms. Applicable
if rainrate is None.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if
dask is installed or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool, optional
If True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input VIL/rain rate
fields. If measure_time is True, the return value is a three-element
tuple containing the nowcast array, the initialization time of the
nowcast generator and the time used in the main loop (seconds).
References
----------
:cite:`PCLH2020`
"""
_check_inputs(vil, rainrate, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if filter_kwargs is None:
filter_kwargs = dict()
print("Computing ANVIL nowcast")
print("-----------------------")
print("")
print("Inputs")
print("------")
print(f"input dimensions: {vil.shape[1]}x{vil.shape[2]}")
print("")
print("Methods")
print("-------")
print(f"extrapolation: {extrap_method}")
print(f"FFT: {fft_method}")
print("")
print("Parameters")
print("----------")
if isinstance(timesteps, int):
print(f"number of time steps: {timesteps}")
else:
print(f"time steps: {timesteps}")
print(f"parallel threads: {num_workers}")
print(f"number of cascade levels: {n_cascade_levels}")
print(f"order of the ARI(p,1) model: {ar_order}")
if type(ar_window_radius) == int:
print(f"ARI(p,1) window radius: {ar_window_radius}")
else:
print("ARI(p,1) window radius: none")
print(f"R(VIL) window radius: {r_vil_window_radius}")
if measure_time:
starttime_init = time.time()
m, n = vil.shape[1:]
vil = vil.copy()
if rainrate is None and apply_rainrate_mask:
rainrate_mask = vil[-1, :] < 0.1
else:
rainrate_mask = None
if rainrate is not None:
# determine the coefficients fields of the relation R=a*VIL+b by
# localized linear regression
r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius)
else:
r_vil_a, r_vil_b = None, None
# transform the input fields to Lagrangian coordinates by extrapolation
extrapolator = extrapolation.get_method(extrap_method)
extrap_kwargs["allow_nonfinite_values"] = (
True if np.any(~np.isfinite(vil)) else False
)
res = list()
def worker(vil, i):
return (
i,
extrapolator(
vil[i, :],
velocity,
vil.shape[0] - 1 - i,
**extrap_kwargs,
)[-1],
)
for i in range(vil.shape[0] - 1):
if not DASK_IMPORTED or num_workers == 1:
vil[i, :, :] = worker(vil, i)[1]
else:
res.append(dask.delayed(worker)(vil, i))
if DASK_IMPORTED and num_workers > 1:
num_workers_ = len(res) if num_workers > len(res) else num_workers
vil_e = dask.compute(*res, num_workers=num_workers_)
for i in range(len(vil_e)):
vil[vil_e[i][0], :] = vil_e[i][1]
# compute the final mask as the intersection of the masks of the advected
# fields
mask = np.isfinite(vil[0, :])
for i in range(1, vil.shape[0]):
mask = np.logical_and(mask, np.isfinite(vil[i, :]))
if rainrate is None and apply_rainrate_mask:
rainrate_mask = np.logical_and(rainrate_mask, mask)
# apply cascade decomposition to the advected input fields
bp_filter_method = cascade.get_method("gaussian")
bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs)
fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers)
decomp_method, recomp_method = cascade.get_method("fft")
vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n))
for i in range(vil.shape[0]):
vil_ = vil[i, :].copy()
vil_[~np.isfinite(vil_)] = 0.0
vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft)
for j in range(n_cascade_levels):
vil_dec[j, i, :] = vil_dec_i["cascade_levels"][j, :]
# compute time-lagged correlation coefficients for the cascade levels of
# the advected and differenced input fields
gamma = np.empty((n_cascade_levels, ar_order, m, n))
for i in range(n_cascade_levels):
vil_diff = np.diff(vil_dec[i, :], axis=0)
vil_diff[~np.isfinite(vil_diff)] = 0.0
for j in range(ar_order):
gamma[i, j, :] = _moving_window_corrcoef(
vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius
)
if ar_order == 2:
# if the order of the ARI model is 2, adjust the correlation coefficients
# so that the resulting process is stationary
for i in range(n_cascade_levels):
gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2(
gamma[i, 0, :], gamma[i, 1, :]
)
# estimate the parameters of the ARI models
phi = []
for i in range(n_cascade_levels):
if ar_order > 2:
phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1)
elif ar_order == 2:
phi_ = _estimate_ar2_params(gamma[i, :])
else:
phi_ = _estimate_ar1_params(gamma[i, :])
phi.append(phi_)
vil_dec = vil_dec[:, -(ar_order + 1) :, :]
if measure_time:
init_time = time.time() - starttime_init
print("Starting nowcast computation.")
rainrate_f = []
extrap_kwargs["return_displacement"] = True
state = {"vil_dec": vil_dec}
params = {
"apply_rainrate_mask": apply_rainrate_mask,
"mask": mask,
"n_cascade_levels": n_cascade_levels,
"phi": phi,
"rainrate": rainrate,
"rainrate_mask": rainrate_mask,
"recomp_method": recomp_method,
"r_vil_a": r_vil_a,
"r_vil_b": r_vil_b,
}
rainrate_f = nowcast_main_loop(
vil[-1, :],
velocity,
state,
timesteps,
extrap_method,
_update,
extrap_kwargs=extrap_kwargs,
params=params,
measure_time=measure_time,
)
if measure_time:
rainrate_f, mainloop_time = rainrate_f
if measure_time:
return np.stack(rainrate_f), init_time, mainloop_time
else:
return np.stack(rainrate_f)
| def forecast(
vil,
velocity,
timesteps,
rainrate=None,
n_cascade_levels=8,
extrap_method="semilagrangian",
ar_order=2,
ar_window_radius=50,
r_vil_window_radius=3,
fft_method="numpy",
apply_rainrate_mask=True,
num_workers=1,
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast by using the autoregressive nowcasting using VIL
(ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast.
The key features are:
1) Growth and decay: implemented by using a cascade decomposition and
a multiscale autoregressive integrated ARI(p,1) model. Instead of the
original time series, the ARI model is applied to the differenced one
corresponding to time derivatives.
2) Originally designed for using integrated liquid (VIL) as the input data.
In this case, the rain rate (R) is obtained from VIL via an empirical
relation. This implementation is more general so that the input can be
any two-dimensional precipitation field.
3) The parameters of the ARI model and the R(VIL) relation are allowed to
be spatially variable. The estimation is done using a moving window.
Parameters
----------
vil: array_like
Array of shape (ar_order+2,m,n) containing the input fields ordered by
timestamp from oldest to newest. The inputs are expected to contain VIL
or rain rate. The time steps between the inputs are assumed to be regular.
velocity: array_like
Array of shape (2,m,n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs. All values are required to be finite.
timesteps: int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
rainrate: array_like
Array of shape (m,n) containing the most recently observed rain rate
field. If set to None, no R(VIL) conversion is done and the outputs
are in the same units as the inputs.
n_cascade_levels: int, optional
The number of cascade levels to use.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
ar_order: int, optional
The order of the autoregressive model to use. The recommended values
are 1 or 2. Using a higher-order model is strongly discouraged because
the stationarity of the AR process cannot be guaranteed.
ar_window_radius: int, optional
The radius of the window to use for determining the parameters of the
autoregressive model. Set to None to disable localization.
r_vil_window_radius: int, optional
The radius of the window to use for determining the R(VIL) relation.
Applicable if rainrate is not None.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
apply_rainrate_mask: bool
Apply mask to prevent producing precipitation to areas where it was not
originally observed. Defaults to True. Disabling this may improve some
verification metrics but increases the number of false alarms. Applicable
if rainrate is None.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if
dask is installed or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool, optional
If True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input VIL/rain rate
fields. If measure_time is True, the return value is a three-element
tuple containing the nowcast array, the initialization time of the
nowcast generator and the time used in the main loop (seconds).
References
----------
:cite:`PCLH2020`
"""
_check_inputs(vil, rainrate, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if filter_kwargs is None:
filter_kwargs = dict()
print("Computing ANVIL nowcast")
print("-----------------------")
print("")
print("Inputs")
print("------")
print(f"input dimensions: {vil.shape[1]}x{vil.shape[2]}")
print("")
print("Methods")
print("-------")
print(f"extrapolation: {extrap_method}")
print(f"FFT: {fft_method}")
print("")
print("Parameters")
print("----------")
if isinstance(timesteps, int):
print(f"number of time steps: {timesteps}")
else:
print(f"time steps: {timesteps}")
print(f"parallel threads: {num_workers}")
print(f"number of cascade levels: {n_cascade_levels}")
print(f"order of the ARI(p,1) model: {ar_order}")
if type(ar_window_radius) == int:
print(f"ARI(p,1) window radius: {ar_window_radius}")
else:
print("ARI(p,1) window radius: none")
print(f"R(VIL) window radius: {r_vil_window_radius}")
if measure_time:
starttime_init = time.time()
m, n = vil.shape[1:]
vil = vil.copy()
if rainrate is None and apply_rainrate_mask:
rainrate_mask = vil[-1, :] < 0.1
else:
rainrate_mask = None
if rainrate is not None:
# determine the coefficients fields of the relation R=a*VIL+b by
# localized linear regression
r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius)
else:
precip_vil_a, precip_vil_b = None, None
# transform the input fields to Lagrangian coordinates by extrapolation
extrapolator = extrapolation.get_method(extrap_method)
extrap_kwargs["allow_nonfinite_values"] = (
True if np.any(~np.isfinite(vil)) else False
)
res = list()
def worker(vil, i):
return (
i,
extrapolator(
vil[i, :],
velocity,
vil.shape[0] - 1 - i,
**extrap_kwargs,
)[-1],
)
for i in range(vil.shape[0] - 1):
if not DASK_IMPORTED or num_workers == 1:
vil[i, :, :] = worker(vil, i)[1]
else:
res.append(dask.delayed(worker)(vil, i))
if DASK_IMPORTED and num_workers > 1:
num_workers_ = len(res) if num_workers > len(res) else num_workers
vil_e = dask.compute(*res, num_workers=num_workers_)
for i in range(len(vil_e)):
vil[vil_e[i][0], :] = vil_e[i][1]
# compute the final mask as the intersection of the masks of the advected
# fields
mask = np.isfinite(vil[0, :])
for i in range(1, vil.shape[0]):
mask = np.logical_and(mask, np.isfinite(vil[i, :]))
if rainrate is None and apply_rainrate_mask:
rainrate_mask = np.logical_and(rainrate_mask, mask)
# apply cascade decomposition to the advected input fields
bp_filter_method = cascade.get_method("gaussian")
bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs)
fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers)
decomp_method, recomp_method = cascade.get_method("fft")
vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n))
for i in range(vil.shape[0]):
vil_ = vil[i, :].copy()
vil_[~np.isfinite(vil_)] = 0.0
vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft)
for j in range(n_cascade_levels):
vil_dec[j, i, :] = vil_dec_i["cascade_levels"][j, :]
# compute time-lagged correlation coefficients for the cascade levels of
# the advected and differenced input fields
gamma = np.empty((n_cascade_levels, ar_order, m, n))
for i in range(n_cascade_levels):
vil_diff = np.diff(vil_dec[i, :], axis=0)
vil_diff[~np.isfinite(vil_diff)] = 0.0
for j in range(ar_order):
gamma[i, j, :] = _moving_window_corrcoef(
vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius
)
if ar_order == 2:
# if the order of the ARI model is 2, adjust the correlation coefficients
# so that the resulting process is stationary
for i in range(n_cascade_levels):
gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2(
gamma[i, 0, :], gamma[i, 1, :]
)
# estimate the parameters of the ARI models
phi = []
for i in range(n_cascade_levels):
if ar_order > 2:
phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1)
elif ar_order == 2:
phi_ = _estimate_ar2_params(gamma[i, :])
else:
phi_ = _estimate_ar1_params(gamma[i, :])
phi.append(phi_)
vil_dec = vil_dec[:, -(ar_order + 1) :, :]
if measure_time:
init_time = time.time() - starttime_init
print("Starting nowcast computation.")
rainrate_f = []
extrap_kwargs["return_displacement"] = True
state = {"vil_dec": vil_dec}
params = {
"apply_rainrate_mask": apply_rainrate_mask,
"mask": mask,
"n_cascade_levels": n_cascade_levels,
"phi": phi,
"rainrate": rainrate,
"rainrate_mask": rainrate_mask,
"recomp_method": recomp_method,
"r_vil_a": r_vil_a,
"r_vil_b": r_vil_b,
}
rainrate_f = nowcast_main_loop(
vil[-1, :],
velocity,
state,
timesteps,
extrap_method,
_update,
extrap_kwargs=extrap_kwargs,
params=params,
measure_time=measure_time,
)
if measure_time:
rainrate_f, mainloop_time = rainrate_f
if measure_time:
return np.stack(rainrate_f), init_time, mainloop_time
else:
return np.stack(rainrate_f)
|
8,788 | def botfactory(settings, autoloads=None):
"""Build a test instance of Sopel, with a :class:`MockIRCBackend`.
:param settings: Sopel's configuration for testing purposes
:type settings: :class:`sopel.config.Config`
:param list autoloads: list of plugins to autoload after creating the bot
:return: a test instance of the bot
:rtype: :class:`sopel.bot.Sopel`
This will instanciate a :class:`~sopel.bot.Sopel` object, replace its
backend with a :class:`~MockIRCBackend`, and then autoload plugins. This
will automatically load the ``coretasks`` plugins, and every other plugins
from ``autoloads``::
bot = botfactory(settings, ['emoticons', 'find', 'remind'])
"""
autoloads = set(autoloads or []) | {'coretasks'}
mockbot = Sopel(settings)
mockbot.backend = MockIRCBackend(mockbot)
usable_plugins = sopel.plugins.get_usable_plugins(settings)
for name in autoloads:
plugin = usable_plugins[name][0]
plugin.load()
plugin.register(mockbot)
return mockbot
| def botfactory(settings, autoloads=None):
"""Build a test instance of Sopel, with a :class:`MockIRCBackend`.
:param settings: Sopel's configuration for testing purposes
:type settings: :class:`sopel.config.Config`
:param list autoloads: list of plugins to autoload after creating the bot
:return: a test instance of the bot
:rtype: :class:`sopel.bot.Sopel`
This will instanciate a :class:`~sopel.bot.Sopel` object, replace its
backend with a :class:`~MockIRCBackend`, and then autoload plugins. This
will automatically load the ``coretasks`` plugin, and every other plugin
from ``autoloads``::
bot = botfactory(settings, ['emoticons', 'find', 'remind'])
"""
autoloads = set(autoloads or []) | {'coretasks'}
mockbot = Sopel(settings)
mockbot.backend = MockIRCBackend(mockbot)
usable_plugins = sopel.plugins.get_usable_plugins(settings)
for name in autoloads:
plugin = usable_plugins[name][0]
plugin.load()
plugin.register(mockbot)
return mockbot
|
15,778 | def round_state(state: float | None) -> float | None:
"""Round state."""
if isinstance(state, float):
return round(state)
return state
| def round_state(state: float | None) -> float | None:
"""Round state."""
return round(state) if state is not None else None
|
30,151 | def fetch_consumption(zone_key='KW', session=None, logger=None):
r = session or requests.session()
url = 'https://www.mew.gov.kw/en'
response = r.get(url)
load = re.findall(r"\((\d{4,5})\)", response.text)
load = int(load[0])
consumption = {}
consumption['unknown'] = load
datapoint = {
'zoneKey': zone_key,
'datetime': arrow.now('Asia/Kuwait').datetime,
'consumption': consumption,
'source': 'mew.gov.kw'
}
return datapoint
| def fetch_consumption(zone_key='KW', session=None, logger=None):
r = session or requests.session()
url = 'https://www.mew.gov.kw/en'
response = r.get(url)
load = re.findall(r"\((\d{4,5})\)", response.text)
load = int(load[0])
consumption = load
datapoint = {
'zoneKey': zone_key,
'datetime': arrow.now('Asia/Kuwait').datetime,
'consumption': consumption,
'source': 'mew.gov.kw'
}
return datapoint
|
46,547 | def run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard, valid=True):
transition_to(spec, state, state.slot + target_len_offset_slot)
body = get_sample_shard_block_body(spec, is_max=True)
shard_block = build_shard_block(spec, state, shard, body=body, slot=state.slot, signed=True)
shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]}
shard_transitions = get_shard_transitions(spec, state, shard_block_dict)
attestations = [
get_valid_on_time_attestation(
spec,
state,
index=committee_index,
shard_transition=shard_transitions[shard],
signed=True,
)
for shard in shard_block_dict.keys()
]
beacon_block = build_empty_block(spec, state, slot=state.slot + 1)
beacon_block.body.attestations = attestations
beacon_block.body.shard_transitions = shard_transitions
pre_gasprice = state.shard_states[shard].gasprice
pre_shard_states = state.shard_states.copy()
yield 'pre', state.copy()
if not valid:
state_transition_and_sign_block(spec, state, beacon_block, expect_fail=True)
yield 'block', beacon_block
yield 'post', None
return
else:
state_transition_and_sign_block(spec, state, beacon_block)
yield 'block', beacon_block
yield 'post', None
for shard in range(spec.get_active_shard_count(state)):
post_shard_state = state.shard_states[shard]
if shard in shard_block_dict:
# Shard state has been changed to state_transition result
assert post_shard_state == shard_transitions[shard].shard_states[
len(shard_transitions[shard].shard_states) - 1
]
assert post_shard_state.slot == state.slot - 1
if len((shard_block_dict[shard])) == 0:
# `latest_block_root` is the same
assert post_shard_state.latest_block_root == pre_shard_states[shard].latest_block_root
if target_len_offset_slot == 1 and len(shard_block_dict[shard]) > 0:
assert post_shard_state.gasprice > pre_gasprice
| def run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard, valid=True):
transition_to(spec, state, state.slot + target_len_offset_slot)
body = get_sample_shard_block_body(spec, is_max=True)
shard_block = build_shard_block(spec, state, shard, body=body, slot=state.slot, signed=True)
shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]}
shard_transitions = get_shard_transitions(spec, state, shard_block_dict)
attestations = [
get_valid_on_time_attestation(
spec,
state,
index=committee_index,
shard_transition=shard_transitions[shard],
signed=True,
)
for shard in shard_block_dict.keys()
]
beacon_block = build_empty_block(spec, state, slot=state.slot + 1)
beacon_block.body.attestations = attestations
beacon_block.body.shard_transitions = shard_transitions
pre_gasprice = state.shard_states[shard].gasprice
pre_shard_states = state.shard_states.copy()
yield 'pre', state.copy()
if not valid:
state_transition_and_sign_block(spec, state, beacon_block, expect_fail=True)
yield 'block', beacon_block
yield 'post', None
return
else:
state_transition_and_sign_block(spec, state, beacon_block)
yield 'block', beacon_block
yield 'post', state
for shard in range(spec.get_active_shard_count(state)):
post_shard_state = state.shard_states[shard]
if shard in shard_block_dict:
# Shard state has been changed to state_transition result
assert post_shard_state == shard_transitions[shard].shard_states[
len(shard_transitions[shard].shard_states) - 1
]
assert post_shard_state.slot == state.slot - 1
if len((shard_block_dict[shard])) == 0:
# `latest_block_root` is the same
assert post_shard_state.latest_block_root == pre_shard_states[shard].latest_block_root
if target_len_offset_slot == 1 and len(shard_block_dict[shard]) > 0:
assert post_shard_state.gasprice > pre_gasprice
|
956 | def solve_undetermined_coeffs(equ, coeffs, *syms, **flags):
r"""
Solve equation of a type $p(x; a_1, \ldots, a_k) = q(x)$ where both
$p$ and $q$ are expressions that depend on $k$ parameters
in a linear fashion and $x$ represents one or more variables appearing
in some functional form in the equation.
Explanation
===========
The result of this function is a dictionary with symbolic values of those
parameters with respect to coefficients in $q$, an empty list if there
is no solution, else None if the system was not recognized.
This function accepts both equations class instances and ordinary
SymPy expressions. The solving process is most efficient when
symbols are specified in addition to parameters to be determined,
but an attempt to determine them (if absent) will be made. If a solution
is not obtained when expected, and symbols were not specified, try
specifying the symbols.
Examples
========
>>> from sympy import Eq, solve_undetermined_coeffs
>>> from sympy.abc import a, b, c, h, p, k, x
>>> solve_undetermined_coeffs(Eq(a*x + a + b, x/2), [a, b], x)
{a: 1/2, b: -1/2}
>>> solve_undetermined_coeffs(a - 2, [a])
{a: 2}
The equation can be nonlinear in the symbols:
>>> X, Y, Z = y, x**y, y*x**y
>>> eq = a*X + b*Y + c*Z - X - 2*Y - 3*Z
>>> coeffs = a, b, c
>>> syms = x, y
>>> solve_undetermined_coeffs(eq, coeffs, syms)
{a: 1, b: 2, c: 3}
And the system can be nonlinear in coefficients provided that
there is a single solution:
>>> eq = a*x**2 + b*x + c - ((x - h)**2 + 4*p*k)/4/p
>>> solve_undetermined_coeffs(eq, (h, p, k), x)
{h: -b/(2*a), k: (4*a*c - b**2)/(4*a), p: 1/(4*a)}
The following, however, has two solutions so None is returned:
>>> None is solve_undetermined_coeffs(a**2*x + b - x, [a, b], x)
True
"""
if not (coeffs and all(i.is_Symbol for i in coeffs)):
raise ValueError('must provide symbols for coeffs')
if isinstance(equ, Eq):
eq = equ.lhs - equ.rhs
else:
eq = equ
eq = _mexpand(cancel(eq).as_numer_denom()[0], recursive=True)
coeffs = eq.free_symbols & set(coeffs)
if not coeffs:
return
if not syms:
# e.g. A*exp(x) + B - (exp(x) + y) separated into parts that
# don't/do depend on coeffs gives
# -(exp(x) + y), A*exp(x) + B
# then see what symbols are common to both
# {x} = {x, A, B} - {x, y}
ind, dep = eq.as_independent(*coeffs, as_Add=True)
dfree = dep.free_symbols
syms = dfree & ind.free_symbols
if not syms:
# but if the system looks like (a + b)*x + b - c
# then {} = {a, b, x} - c
# so calculate {x} = {a, b, x} - {a, b}
syms = dfree - set(coeffs)
if not syms:
syms = [Dummy()]
sym_dict = {}
else:
if len(syms) == 1 and iterable(syms[0]):
syms = syms[0]
e, s, sym_dict = recast_to_symbols([eq], syms)
eq = e[0]
syms = s
# find the functional forms in which symbols appear
gens = set(eq.as_coefficients_dict(*syms).keys()) - {1}
# make sure we are working with symbols for generators
e, s, d = recast_to_symbols([eq], list(gens))
eq = e[0]
gens = s
# collect coefficients in front of generators
system = list(collect(eq, gens, evaluate=False).values())
# get a solution
_flags = dict(flags, set=None, dict=True)
soln = solve(system, coeffs, **_flags)
if not soln:
return soln
elif len(soln) > 1:
return # not recognized
else:
soln = soln[0]
# simplify because this flag is ignored by solve for a linear system
if flags.get('simplify', True):
soln = {sym_dict.get(k, k): v.simplify()
for k, v in soln.items()}
# done
return soln
| def solve_undetermined_coeffs(equ, coeffs, *syms, **flags):
r"""
Solve equation of a type $p(x; a_1, \ldots, a_k) = q(x)$ where both
$p$ and $q$ are expressions that depend on $k$ parameters
in a linear fashion and $x$ represents one or more variables appearing
in some functional form in the equation.
Explanation
===========
The result of this function is a dictionary with symbolic values of those
parameters with respect to coefficients in $q$, an empty list if there
is no solution, else None if the system was not recognized.
This function accepts both equations class instances and ordinary
SymPy expressions. The solving process is most efficient when
symbols are specified in addition to parameters to be determined,
but an attempt to determine them (if absent) will be made. If a solution
is not obtained when expected, and symbols were not specified, try
specifying the symbols.
Examples
========
>>> from sympy import Eq, solve_undetermined_coeffs
>>> from sympy.abc import a, b, c, h, p, k, x, y
>>> solve_undetermined_coeffs(Eq(a*x + a + b, x/2), [a, b], x)
{a: 1/2, b: -1/2}
>>> solve_undetermined_coeffs(a - 2, [a])
{a: 2}
The equation can be nonlinear in the symbols:
>>> X, Y, Z = y, x**y, y*x**y
>>> eq = a*X + b*Y + c*Z - X - 2*Y - 3*Z
>>> coeffs = a, b, c
>>> syms = x, y
>>> solve_undetermined_coeffs(eq, coeffs, syms)
{a: 1, b: 2, c: 3}
And the system can be nonlinear in coefficients provided that
there is a single solution:
>>> eq = a*x**2 + b*x + c - ((x - h)**2 + 4*p*k)/4/p
>>> solve_undetermined_coeffs(eq, (h, p, k), x)
{h: -b/(2*a), k: (4*a*c - b**2)/(4*a), p: 1/(4*a)}
The following, however, has two solutions so None is returned:
>>> None is solve_undetermined_coeffs(a**2*x + b - x, [a, b], x)
True
"""
if not (coeffs and all(i.is_Symbol for i in coeffs)):
raise ValueError('must provide symbols for coeffs')
if isinstance(equ, Eq):
eq = equ.lhs - equ.rhs
else:
eq = equ
eq = _mexpand(cancel(eq).as_numer_denom()[0], recursive=True)
coeffs = eq.free_symbols & set(coeffs)
if not coeffs:
return
if not syms:
# e.g. A*exp(x) + B - (exp(x) + y) separated into parts that
# don't/do depend on coeffs gives
# -(exp(x) + y), A*exp(x) + B
# then see what symbols are common to both
# {x} = {x, A, B} - {x, y}
ind, dep = eq.as_independent(*coeffs, as_Add=True)
dfree = dep.free_symbols
syms = dfree & ind.free_symbols
if not syms:
# but if the system looks like (a + b)*x + b - c
# then {} = {a, b, x} - c
# so calculate {x} = {a, b, x} - {a, b}
syms = dfree - set(coeffs)
if not syms:
syms = [Dummy()]
sym_dict = {}
else:
if len(syms) == 1 and iterable(syms[0]):
syms = syms[0]
e, s, sym_dict = recast_to_symbols([eq], syms)
eq = e[0]
syms = s
# find the functional forms in which symbols appear
gens = set(eq.as_coefficients_dict(*syms).keys()) - {1}
# make sure we are working with symbols for generators
e, s, d = recast_to_symbols([eq], list(gens))
eq = e[0]
gens = s
# collect coefficients in front of generators
system = list(collect(eq, gens, evaluate=False).values())
# get a solution
_flags = dict(flags, set=None, dict=True)
soln = solve(system, coeffs, **_flags)
if not soln:
return soln
elif len(soln) > 1:
return # not recognized
else:
soln = soln[0]
# simplify because this flag is ignored by solve for a linear system
if flags.get('simplify', True):
soln = {sym_dict.get(k, k): v.simplify()
for k, v in soln.items()}
# done
return soln
|
2,682 | def fetch_covtype(
*,
data_home=None,
download_if_missing=True,
random_state=None,
shuffle=False,
return_X_y=False,
as_frame=False,
):
"""Load the covertype dataset (classification).
Download it if necessary.
================= ============
Classes 7
Samples total 581012
Dimensionality 54
Features int
================= ============
Read more in the :ref:`User Guide <covtype_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=False
Whether to shuffle dataset.
return_X_y : bool, default=False
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is a pandas DataFrame or
Series depending on the number of target columns. If `return_X_y` is
True, then (`data`, `target`) will be pandas DataFrames or Series as
described below.
.. versionadded:: 0.24
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (581012, 54)
Each row corresponds to the 54 features in the dataset.
target : ndarray of shape (581012,)
Each value corresponds to one of
the 7 forest covertypes with values
ranging between 1 to 7.
frame : dataframe of shape (581012, 55)
Only present when `as_frame=True`. Contains `data` and `target`.
DESCR : str
Description of the forest covertype dataset.
feature_names : list
The names of the dataset columns.
target_names: list
The names of the target columns.
(data, target) : tuple if ``return_X_y`` is True
A tuple of two ndarray. The first containing a 2D array of
shape (n_samples, n_features) with each row representing one
sample and each column representing the features. The second
ndarray of shape (n_samples,) containing the target samples.
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
covtype_dir = join(data_home, "covertype")
samples_path = _pkl_filepath(covtype_dir, "samples")
targets_path = _pkl_filepath(covtype_dir, "targets")
available = exists(samples_path) and exists(targets_path)
if download_if_missing and not available:
os.makedirs(covtype_dir, exist_ok=True)
# Creating temp_dir as a direct subdirectory of the target directory
# guarantees that both reside on the same filesystem, so that we can use
# os.rename to atomically move the data files to their target location.
with TemporaryDirectory(dir=covtype_dir) as temp_dir:
logger.info("Downloading %s" % ARCHIVE.url)
archive_path = _fetch_remote(ARCHIVE, dirname=temp_dir)
Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=",")
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32, copy=False)
samples_tmp_path = _pkl_filepath(temp_dir, "samples")
joblib.dump(X, samples_tmp_path, compress=9)
os.rename(samples_tmp_path, samples_path)
targets_tmp_path = _pkl_filepath(temp_dir, "targets")
joblib.dump(y, targets_tmp_path, compress=9)
os.rename(targets_tmp_path, targets_path)
elif not available and not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
fdescr = load_descr("covtype.rst")
frame = None
if as_frame:
frame, X, y = _convert_data_dataframe(
caller_name="fetch_covtype",
data=X,
target=y,
feature_names=FEATURE_NAMES,
target_names=TARGET_NAMES,
)
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
frame=frame,
target_names=TARGET_NAMES,
feature_names=FEATURE_NAMES,
DESCR=fdescr,
)
| def fetch_covtype(
*,
data_home=None,
download_if_missing=True,
random_state=None,
shuffle=False,
return_X_y=False,
as_frame=False,
):
"""Load the covertype dataset (classification).
Download it if necessary.
================= ============
Classes 7
Samples total 581012
Dimensionality 54
Features int
================= ============
Read more in the :ref:`User Guide <covtype_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=False
Whether to shuffle dataset.
return_X_y : bool, default=False
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is a pandas DataFrame or
Series depending on the number of target columns. If `return_X_y` is
True, then (`data`, `target`) will be pandas DataFrames or Series as
described below.
.. versionadded:: 0.24
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (581012, 54)
Each row corresponds to the 54 features in the dataset.
target : ndarray of shape (581012,)
Each value corresponds to one of
the 7 forest covertypes with values
ranging between 1 to 7.
frame : dataframe of shape (581012, 55)
Only present when `as_frame=True`. Contains `data` and `target`.
DESCR : str
Description of the forest covertype dataset.
feature_names : list
The names of the dataset columns.
target_names: list
The names of the target columns.
(data, target) : tuple if ``return_X_y`` is True
A tuple of two ndarray. The first containing a 2D array of
shape (n_samples, n_features) with each row representing one
sample and each column representing the features. The second
ndarray of shape (n_samples,) containing the target samples.
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
covtype_dir = join(data_home, "covertype")
samples_path = _pkl_filepath(covtype_dir, "samples")
targets_path = _pkl_filepath(covtype_dir, "targets")
available = exists(samples_path) and exists(targets_path)
if download_if_missing and not available:
os.makedirs(covtype_dir, exist_ok=True)
# Creating temp_dir as a direct subdirectory of the target directory
# guarantees that both reside on the same filesystem, so that we can use
# os.rename to atomically move the data files to their target location.
with TemporaryDirectory(dir=covtype_dir) as temp_dir:
logger.info(f"Downloading {ARCHIVE.url}")
archive_path = _fetch_remote(ARCHIVE, dirname=temp_dir)
Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=",")
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32, copy=False)
samples_tmp_path = _pkl_filepath(temp_dir, "samples")
joblib.dump(X, samples_tmp_path, compress=9)
os.rename(samples_tmp_path, samples_path)
targets_tmp_path = _pkl_filepath(temp_dir, "targets")
joblib.dump(y, targets_tmp_path, compress=9)
os.rename(targets_tmp_path, targets_path)
elif not available and not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
fdescr = load_descr("covtype.rst")
frame = None
if as_frame:
frame, X, y = _convert_data_dataframe(
caller_name="fetch_covtype",
data=X,
target=y,
feature_names=FEATURE_NAMES,
target_names=TARGET_NAMES,
)
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
frame=frame,
target_names=TARGET_NAMES,
feature_names=FEATURE_NAMES,
DESCR=fdescr,
)
|
41,395 | def __render_team_text(canvas, layout, colors, team, homeaway, full_team_names, default_colors, short_team_names_for_runs_hits, vs_team):
text_color = colors.get("text", default_colors["text"])
text_color_graphic = graphics.Color(text_color["r"], text_color["g"], text_color["b"])
coords = layout.coords("teams.name.{}".format(homeaway))
font = layout.font("teams.name.{}".format(homeaway))
team_text = "{:3s}".format(team.abbrev.upper())
if full_team_names and canvas.width > 32 and not (short_team_names_for_runs_hits and (team.runs > 9 or team.hits > 9 or vs_team.runs > 9 or vs_team.hits > 9)):
team_text = "{:13s}".format(team.name)
graphics.DrawText(canvas, font["font"], coords["x"], coords["y"], text_color_graphic, team_text)
| def __render_team_text(canvas, layout, colors, team, homeaway, full_team_names, default_colors, short_team_names_for_runs_hits, vs_team):
text_color = colors.get("text", default_colors["text"])
text_color_graphic = graphics.Color(text_color["r"], text_color["g"], text_color["b"])
coords = layout.coords("teams.name.{}".format(homeaway))
font = layout.font("teams.name.{}".format(homeaway))
team_text = "{:3s}".format(team.abbrev.upper())
if use_full_team_names:
team_text = "{:13s}".format(team.name)
graphics.DrawText(canvas, font["font"], coords["x"], coords["y"], text_color_graphic, team_text)
|
25,908 | def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
from msrestazure.tools import is_valid_resource_id
if scope.startswith('/subscriptions/') and not is_valid_resource_id(scope):
raise CLIError('Invalid scope. Please use --help to view the valid format.')
elif scope == '':
raise CLIError('Invalid scope. Please use --help to view the valid format.')
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
| def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
from azure.mgmt.core.tools import is_valid_resource_id
if scope.startswith('/subscriptions/') and not is_valid_resource_id(scope):
raise CLIError('Invalid scope. Please use --help to view the valid format.')
elif scope == '':
raise CLIError('Invalid scope. Please use --help to view the valid format.')
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
|
22,715 | def main():
print('Gather runtime data')
try:
subprocess.check_output(['choco', '--version'])
except subprocess.CalledProcessError:
raise RuntimeError('Error: Chocolatey (https://chocolatey.org/) needs'
'to be installed to run this script.')
script_path = os.path.realpath(__file__)
repo_path = os.path.dirname(os.path.dirname(script_path))
build_path = os.path.join(repo_path, 'windows-installer', 'build')
venv_path = os.path.join(build_path, 'venv-config')
venv_python = os.path.join(venv_path, 'Scripts', 'python.exe')
installer_cfg_path = os.path.join(build_path, 'installer.cfg')
wheels_path = os.path.join(build_path, 'wheels')
certbot_version = subprocess.check_output([sys.executable, '-c', 'import certbot; print(certbot.__version__)'],
universal_newlines=True, cwd=repo_path).strip()
certbot_packages = ['acme', '.']
certbot_packages.extend([name for name in os.listdir(repo_path) if name.startswith('certbot-dns-')])
print('Copy assets')
os.makedirs(build_path, exist_ok=True)
shutil.copy(os.path.join(repo_path, 'windows-installer', 'certbot.ico'), build_path)
shutil.copy(os.path.join(repo_path, 'windows-installer', 'run.py'), build_path)
print('Prepare pynsist config')
with open(os.path.join(installer_cfg_path), 'w') as file_h:
file_h.write("""\
[Application]
name=Certbot
version={certbot_version}
icon=certbot.ico
publisher=Electronic Frontier Fundation
script=run.py
[Build]
directory=nsis
installer_name=certbot-{certbot_version}-win32_install.exe
[Python]
version=3.7.0
[Include]
local_wheels=wheels\*.whl
[Command certbot]
entry_point=certbot.main:main
""".format(certbot_version=certbot_version))
print('Prepare build environment')
subprocess.check_call([sys.executable, '-m', 'venv', '--clear', venv_path])
subprocess.check_call(['choco', 'upgrade', '-y', 'nsis'])
subprocess.check_call([venv_python, '-m', 'pip', 'install', '--upgrade', 'pip'])
shutil.rmtree(wheels_path, ignore_errors=True)
os.makedirs(wheels_path, exist_ok=True)
subprocess.check_call([venv_python, '-m', 'pip', 'install', 'wheel', 'pynsist'])
print('Compile wheels')
wheels_project = [os.path.join(repo_path, package) for package in certbot_packages]
command = [venv_python, '-m', 'pip', 'wheel', '-w', wheels_path]
command.extend(wheels_project)
subprocess.check_call(command)
print('Build the installer')
subprocess.check_call([os.path.join(venv_path, 'Scripts', 'pynsist.exe'), installer_cfg_path])
print('Done')
| def main():
print('Gather runtime data')
try:
subprocess.check_output(['choco', '--version'])
except subprocess.CalledProcessError:
raise RuntimeError('Error: Chocolatey (https://chocolatey.org/) needs'
' to be installed to run this script.')
script_path = os.path.realpath(__file__)
repo_path = os.path.dirname(os.path.dirname(script_path))
build_path = os.path.join(repo_path, 'windows-installer', 'build')
venv_path = os.path.join(build_path, 'venv-config')
venv_python = os.path.join(venv_path, 'Scripts', 'python.exe')
installer_cfg_path = os.path.join(build_path, 'installer.cfg')
wheels_path = os.path.join(build_path, 'wheels')
certbot_version = subprocess.check_output([sys.executable, '-c', 'import certbot; print(certbot.__version__)'],
universal_newlines=True, cwd=repo_path).strip()
certbot_packages = ['acme', '.']
certbot_packages.extend([name for name in os.listdir(repo_path) if name.startswith('certbot-dns-')])
print('Copy assets')
os.makedirs(build_path, exist_ok=True)
shutil.copy(os.path.join(repo_path, 'windows-installer', 'certbot.ico'), build_path)
shutil.copy(os.path.join(repo_path, 'windows-installer', 'run.py'), build_path)
print('Prepare pynsist config')
with open(os.path.join(installer_cfg_path), 'w') as file_h:
file_h.write("""\
[Application]
name=Certbot
version={certbot_version}
icon=certbot.ico
publisher=Electronic Frontier Fundation
script=run.py
[Build]
directory=nsis
installer_name=certbot-{certbot_version}-win32_install.exe
[Python]
version=3.7.0
[Include]
local_wheels=wheels\*.whl
[Command certbot]
entry_point=certbot.main:main
""".format(certbot_version=certbot_version))
print('Prepare build environment')
subprocess.check_call([sys.executable, '-m', 'venv', '--clear', venv_path])
subprocess.check_call(['choco', 'upgrade', '-y', 'nsis'])
subprocess.check_call([venv_python, '-m', 'pip', 'install', '--upgrade', 'pip'])
shutil.rmtree(wheels_path, ignore_errors=True)
os.makedirs(wheels_path, exist_ok=True)
subprocess.check_call([venv_python, '-m', 'pip', 'install', 'wheel', 'pynsist'])
print('Compile wheels')
wheels_project = [os.path.join(repo_path, package) for package in certbot_packages]
command = [venv_python, '-m', 'pip', 'wheel', '-w', wheels_path]
command.extend(wheels_project)
subprocess.check_call(command)
print('Build the installer')
subprocess.check_call([os.path.join(venv_path, 'Scripts', 'pynsist.exe'), installer_cfg_path])
print('Done')
|
43,082 | def generate_code(prog, eng=None):
"""Converts a Strawberry Fields program into valid Strawberry Fields code.
**Example:**
.. code-block:: python3
prog = sf.Program(3)
eng = sf.Engine("fock", backend_options={"cutoff_dim": 5})
with prog.context as q:
ops.Sgate(2*np.pi/3) | q[1]
ops.BSgate(0.6, 0.1) | (q[2], q[0])
ops.MeasureFock() | q
results = eng.run(prog)
code_str = sf.io.generate_code(prog, eng=eng)
This will create the following string:
.. code-block:: pycon
>>> print(code_str)
import strawberryfields as sf
from strawberryfields import ops
prog = sf.Program(3)
eng = sf.Engine("fock", backend_options={"cutoff_dim": 5})
with prog.context as q:
ops.Sgate(2*np.pi/3, 0.0) | q[1]
ops.BSgate(0.6, 0.1) | (q[2], q[0])
ops.MeasureFock() | (q[0], q[1], q[2])
results = eng.run(prog)
Args:
prog (Program): the Strawberry Fields program
eng (Engine): The Strawberryfields engine. If ``None``, only the Program
parts will be in the resulting code-string.
Returns:
str: the Strawberry Fields code, for constructing the program, as a string
"""
code_seq = ["import strawberryfields as sf", "from strawberryfields import ops\n"]
if isinstance(prog, TDMProgram):
code_seq.append(f"prog = sf.TDMProgram(N={prog.N})")
else:
code_seq.append(f"prog = sf.Program({prog.num_subsystems})")
# check if an engine is supplied; if so, format and add backend/target
# along with backend options
if eng:
eng_type = eng.__class__.__name__
if eng_type == "RemoteEngine":
code_seq.append(f'eng = sf.RemoteEngine("{eng.target}")')
else:
if "cutoff_dim" in eng.backend_options:
formatting_str = (
f'"{eng.backend_name}", backend_options='
+ f'{{"cutoff_dim": {eng.backend_options["cutoff_dim"]}}}'
)
code_seq.append(f"eng = sf.Engine({formatting_str})")
else:
code_seq.append(f'eng = sf.Engine("{eng.backend_name}")')
# check if program is a TDMProgram and format the context as appropriate
if isinstance(prog, TDMProgram):
# if the context arrays contain pi-values, factor out multiples of np.pi
tdm_params = [f"[{_factor_out_pi(par)}]" for par in prog.tdm_params]
code_seq.append("\nwith prog.context(" + ", ".join(tdm_params) + ") as (p, q):")
else:
code_seq.append("\nwith prog.context as q:")
# add the operations, and replace any free parameters with e.g. `p[0]`, `p[1]`
for cmd in prog.circuit or []:
name = cmd.op.__class__.__name__
if isinstance(prog, TDMProgram):
format_dict = {k: f"p[{k[1:]}]" for k in prog.parameters.keys()}
params_str = _factor_out_pi(cmd.op.p).format(**format_dict)
else:
params_str = _factor_out_pi(cmd.op.p)
modes = [f"q[{r.ind}]" for r in cmd.reg]
if len(modes) == 1:
modes_str = ", ".join(modes)
else:
modes_str = "(" + ", ".join(modes) + ")"
op = f" ops.{name}({params_str}) | {modes_str}"
code_seq.append(op)
if eng:
code_seq.append("\nresults = eng.run(prog)")
return "\n".join(code_seq)
| def generate_code(prog: Program, eng: Optional[Engine] = None) -> str:
"""Converts a Strawberry Fields program into valid Strawberry Fields code.
**Example:**
.. code-block:: python3
prog = sf.Program(3)
eng = sf.Engine("fock", backend_options={"cutoff_dim": 5})
with prog.context as q:
ops.Sgate(2*np.pi/3) | q[1]
ops.BSgate(0.6, 0.1) | (q[2], q[0])
ops.MeasureFock() | q
results = eng.run(prog)
code_str = sf.io.generate_code(prog, eng=eng)
This will create the following string:
.. code-block:: pycon
>>> print(code_str)
import strawberryfields as sf
from strawberryfields import ops
prog = sf.Program(3)
eng = sf.Engine("fock", backend_options={"cutoff_dim": 5})
with prog.context as q:
ops.Sgate(2*np.pi/3, 0.0) | q[1]
ops.BSgate(0.6, 0.1) | (q[2], q[0])
ops.MeasureFock() | (q[0], q[1], q[2])
results = eng.run(prog)
Args:
prog (Program): the Strawberry Fields program
eng (Engine): The Strawberryfields engine. If ``None``, only the Program
parts will be in the resulting code-string.
Returns:
str: the Strawberry Fields code, for constructing the program, as a string
"""
code_seq = ["import strawberryfields as sf", "from strawberryfields import ops\n"]
if isinstance(prog, TDMProgram):
code_seq.append(f"prog = sf.TDMProgram(N={prog.N})")
else:
code_seq.append(f"prog = sf.Program({prog.num_subsystems})")
# check if an engine is supplied; if so, format and add backend/target
# along with backend options
if eng:
eng_type = eng.__class__.__name__
if eng_type == "RemoteEngine":
code_seq.append(f'eng = sf.RemoteEngine("{eng.target}")')
else:
if "cutoff_dim" in eng.backend_options:
formatting_str = (
f'"{eng.backend_name}", backend_options='
+ f'{{"cutoff_dim": {eng.backend_options["cutoff_dim"]}}}'
)
code_seq.append(f"eng = sf.Engine({formatting_str})")
else:
code_seq.append(f'eng = sf.Engine("{eng.backend_name}")')
# check if program is a TDMProgram and format the context as appropriate
if isinstance(prog, TDMProgram):
# if the context arrays contain pi-values, factor out multiples of np.pi
tdm_params = [f"[{_factor_out_pi(par)}]" for par in prog.tdm_params]
code_seq.append("\nwith prog.context(" + ", ".join(tdm_params) + ") as (p, q):")
else:
code_seq.append("\nwith prog.context as q:")
# add the operations, and replace any free parameters with e.g. `p[0]`, `p[1]`
for cmd in prog.circuit or []:
name = cmd.op.__class__.__name__
if isinstance(prog, TDMProgram):
format_dict = {k: f"p[{k[1:]}]" for k in prog.parameters.keys()}
params_str = _factor_out_pi(cmd.op.p).format(**format_dict)
else:
params_str = _factor_out_pi(cmd.op.p)
modes = [f"q[{r.ind}]" for r in cmd.reg]
if len(modes) == 1:
modes_str = ", ".join(modes)
else:
modes_str = "(" + ", ".join(modes) + ")"
op = f" ops.{name}({params_str}) | {modes_str}"
code_seq.append(op)
if eng:
code_seq.append("\nresults = eng.run(prog)")
return "\n".join(code_seq)
|
25,942 | def load_arguments(self, _):
# Model imports
StorageAccountTypes = self.get_models('StorageAccountTypes')
DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes,', operation_group='disks')
SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
HyperVGenerationTypes, HyperVGeneration = self.get_models('HyperVGenerationTypes', 'HyperVGeneration')
DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes')
OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.")
image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name')
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
ultra_ssd_enabled_type = CLIArgumentType(
arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
scale_in_policy_type = CLIArgumentType(
nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')),
help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.'
)
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('for_upload', arg_type=get_three_state_flag(),
help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope))
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType')),
help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.')
c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.')
c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.')
operation_group = 'disks' if scope == 'disk' else 'snapshots'
c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group)))
c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.')
c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default, and it does not apply to Ultra disks.')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
c.argument('upload_size_bytes', type=int, min_api='2019-03-01',
help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520')
c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time')
c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('gallery_image_reference', help='ID of the shared galley image version from which to create a disk')
c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.')
c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra disks.')
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01',
help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed')
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes),
help="Storage caching type for the image's data disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
# endregion
# region Image Templates
with self.argument_context('image builder') as c:
ib_output_name_help = "Name of the image builder run output."
c.argument('location', get_location_type(self.cli_ctx))
c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."
" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'")
c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.")
c.argument('image_template_name', image_template_name_type, help="The name of the image template.")
c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image")
c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." '
'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.')
c.argument('output_name', help=ib_output_name_help)
c.ignore('destinations_lists', 'scripts_list', 'source_dict')
with self.argument_context('image builder create') as c:
ib_source_type = CLIArgumentType(arg_group="Image Source")
ib_customizer_type = CLIArgumentType(arg_group="Customizer")
ib_cutput_type = CLIArgumentType(arg_group="Output")
c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.")
c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/image-builder-json')
c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.')
# VM profile
c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)')
c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size')
c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name')
c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine')
# Image Source Arguments
c.argument('source', arg_type=ib_source_type)
c.argument('checksum', arg_type=ib_source_type)
c.argument('', arg_type=ib_source_type)
# Image Customizer Arguments
c.argument('scripts', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
# Image Output Arguments
c.argument('managed_image_destinations', arg_type=ib_cutput_type)
c.argument('shared_image_destinations', arg_type=ib_cutput_type)
c.argument('output_name', arg_type=ib_cutput_type)
with self.argument_context('image builder output') as c:
ib_sig_regions_help = "Space-separated list of regions to replicate the image version into."
ib_img_location_help = "Location where the customized image will be created."
c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.")
c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help)
c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.")
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help)
with self.argument_context('image builder output add') as c:
ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help']
ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"])
ib_default_loc_help = " Defaults to resource group's location."
c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help)
c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true')
c.argument('tags', arg_type=ib_artifact_tags_type)
c.ignore('location')
with self.argument_context('image builder customizer') as c:
ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart")
ib_win_update_type = CLIArgumentType(arg_group="Windows Update")
ib_script_type = CLIArgumentType(arg_group="Shell and Powershell")
ib_powershell_type = CLIArgumentType(arg_group="Powershell")
ib_file_customizer_type = CLIArgumentType(arg_group="File")
c.argument('customizer_name', help="Name of the customizer.")
c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType))
# Script Args
c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.")
c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.")
# Powershell Specific Args
c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers")
# Windows Restart Specific Args
c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.")
c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.")
c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m")
# Windows Update Specific Args
c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.')
c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)')
c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)')
# File Args
c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.")
c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image")
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable secure boot.')
c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable vTPM.')
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE):
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine")
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.')
c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01')
c.argument('platform_fault_domain', min_api='2020-06-01',
help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view')
c.argument('count', type=int, is_preview=True,
help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.')
c.argument('security_type', arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01',
help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.')
c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable secure boot. It is part of trusted launch.')
c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable vTPM. It is part of trusted launch.')
with self.argument_context('vm create', arg_group='Storage') as c:
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', deprecate_info=c.deprecate(expiration='3.0.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'])
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
with self.argument_context('vm image terms') as c:
c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted')
c.argument('publisher', help='Image publisher')
c.argument('offer', help='Image offer')
c.argument('plan', help='Image billing plan')
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
with self.argument_context('vm host') as c:
c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group")
c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host")
c.ignore('expand')
with self.argument_context('vm host create') as c:
c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int,
help="Fault domain of the host within a group. Allowed values: 0, 1, 2")
c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(),
help="Replace the host automatically if a failure occurs")
c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes),
help="The software license type that will be applied to the VMs deployed on the dedicated host.")
c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/en-us/pricing/details/virtual-machines/dedicated-host/")
with self.argument_context('vm host list') as c:
c.argument('host_group_name', id_part=None)
with self.argument_context('vm host group') as c:
c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group")
c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Specify whether virtual machines or virtual machine scale sets can be placed automatically '
'on the dedicated host group. Automatic placement means resources are allocated on dedicated '
'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to '
'false when not provided.')
with self.argument_context('vm host group create') as c:
c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int,
help="Number of fault domains that the host group can span.")
c.argument('zones', zone_type)
for scope in ["vm host", "vm host group"]:
with self.argument_context("{} create".format(scope)) as c:
location_type = get_location_type(self.cli_ctx)
custom_location_msg = " Otherwise, location will default to the resource group's location"
custom_location_type = CLIArgumentType(overrides=location_type,
help=location_type.settings["help"] + custom_location_msg)
c.argument('location', arg_type=custom_location_type)
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
c.argument('host_group', min_api='2020-06-01',
help='Name or ID of dedicated host group that the virtual machine scale set resides in')
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create') as c:
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group."
" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.")
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set")
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long')
c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.',
arg_type=get_enum_type(['Uniform', 'Flexible']), is_preview=True)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('automatic_repairs_grace_period', min_api='2018-10-01',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.')
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vmss update') as c:
protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01')
c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.")
c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).")
c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(),
help='Enable terminate notification')
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('scale_in_policy', scale_in_policy_type)
with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c:
c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs')
c.argument(
'automatic_repairs_grace_period',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.'
)
for scope in ['vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('terminate_notification_time', min_api='2019-03-01',
help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted')
for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
with self.argument_context(scope) as c:
c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix))
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss set-orchestration-service-state') as c:
c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.')
c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.')
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade',
options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')],
arg_type=get_three_state_flag(),
help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file")
with self.argument_context('{} stop'.format(scope)) as c:
c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.')
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if DiskStorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.')
c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01',
help='Names or IDs (space delimited) of disk encryption sets for data disks.')
c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.')
c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default.', default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access")
c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have")
c.ignore('identity_role_id')
with self.argument_context('vm auto-shutdown') as c:
c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.')
c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)')
c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730')
c.argument('webhook', help='The webhook URL to which the notification will be sent')
c.argument('location', validator=get_default_location_from_resource_group)
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.')
c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(),
help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.')
with self.argument_context('vm extension set') as c:
c.argument('vm_extension_name', name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'),
help='Name of the extension.', id_part=None)
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \
"To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \
"To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \
"Remember that after setting the license type on VMSs, execute the following command to apply the changes to the existing VM." \
"`az vmss update-instances -g 'Your resource group name' -n 'Your VMSS name' --instance-ids '*' `" \
"For more information see the Azure Windows VM online docs."
c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'None']))
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True,
help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons')
with self.argument_context('vm update') as c:
c.argument('license_type', help=license_msg, arg_type=get_enum_type(
['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_ELS_6', 'None']))
with self.argument_context('vmss create') as c:
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.")
c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
with self.argument_context('sig update') as c:
c.ignore('gallery')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
with self.argument_context('sig image-version create') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('os_snapshot', help='Name or ID of OS disk snapshot')
c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots')
c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions",
arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01')
c.argument('target_region_encryption', nargs='+',
help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.')
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace,
help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. '
'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
# endregion
# region Proximity Placement Group
with self.argument_context('ppg', min_api='2018-04-01') as c:
c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.")
with self.argument_context('ppg create', min_api='2018-04-01') as c:
c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.")
c.argument('tags', tags_type)
with self.argument_context('ppg show', min_api='2019-07-01') as c:
c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.')
for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'),
('vm availability-set create', 'availability set'),
('vm update', 'VM'), ('vmss update', 'VMSS'),
('vm availability-set update', 'availability set')]:
with self.argument_context(scope, min_api='2018-04-01') as c:
c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item),
validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added.
# endregion
# region VM Monitor
with self.argument_context('vm monitor log show') as c:
c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.")
c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.")
with self.argument_context('vm monitor metrics') as c:
c.argument('metricnamespace', options_list=['--namespace'],
help='Namespace to query metric definitions for.')
with self.argument_context('vm monitor metrics tail') as c:
from azure.mgmt.monitor.models import AggregationType
c.extra('resource_group_name', required=True)
c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
c.argument('metadata', action='store_true')
c.argument('dimension', nargs='*', validator=validate_metric_dimension)
c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*')
c.argument('metrics', nargs='*')
c.argument('orderby',
help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc')
c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.')
c.argument('filters', options_list=['--filter'])
c.argument('metric_namespace', options_list=['--namespace'])
with self.argument_context('vm monitor metrics tail', arg_group='Time') as c:
c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.'))
c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.'))
c.argument('offset', type=get_period_type(as_timedelta=True))
c.argument('interval', arg_group='Time', type=get_period_type())
with self.argument_context('vm monitor metrics list-definitions') as c:
c.extra('resource_group_name', required=True)
c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
# endregion
# region disk encryption set
with self.argument_context('disk-encryption-set') as c:
c.argument('disk_encryption_set_name', disk_encryption_set_name)
c.argument('key_url', help='URL pointing to a key or secret in KeyVault.')
c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.')
c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']),
help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endregion
# region DiskAccess
with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c:
c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endRegion
| def load_arguments(self, _):
# Model imports
StorageAccountTypes = self.get_models('StorageAccountTypes')
DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes,', operation_group='disks')
SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
HyperVGenerationTypes, HyperVGeneration = self.get_models('HyperVGenerationTypes', 'HyperVGeneration')
DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes')
OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.")
image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name')
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
ultra_ssd_enabled_type = CLIArgumentType(
arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
scale_in_policy_type = CLIArgumentType(
nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')),
help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.'
)
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('for_upload', arg_type=get_three_state_flag(),
help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope))
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType')),
help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.')
c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.')
c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.')
operation_group = 'disks' if scope == 'disk' else 'snapshots'
c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group)))
c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.')
c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default, and it does not apply to Ultra disks.')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
c.argument('upload_size_bytes', type=int, min_api='2019-03-01',
help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520')
c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time')
c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('gallery_image_reference', help='ID of the shared galley image version from which to create a disk')
c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.')
c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra disks.')
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01',
help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed')
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes),
help="Storage caching type for the image's data disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
# endregion
# region Image Templates
with self.argument_context('image builder') as c:
ib_output_name_help = "Name of the image builder run output."
c.argument('location', get_location_type(self.cli_ctx))
c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."
" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'")
c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.")
c.argument('image_template_name', image_template_name_type, help="The name of the image template.")
c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image")
c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." '
'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.')
c.argument('output_name', help=ib_output_name_help)
c.ignore('destinations_lists', 'scripts_list', 'source_dict')
with self.argument_context('image builder create') as c:
ib_source_type = CLIArgumentType(arg_group="Image Source")
ib_customizer_type = CLIArgumentType(arg_group="Customizer")
ib_cutput_type = CLIArgumentType(arg_group="Output")
c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.")
c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/image-builder-json')
c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.')
# VM profile
c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)')
c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size')
c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name')
c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine')
# Image Source Arguments
c.argument('source', arg_type=ib_source_type)
c.argument('checksum', arg_type=ib_source_type)
c.argument('', arg_type=ib_source_type)
# Image Customizer Arguments
c.argument('scripts', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
# Image Output Arguments
c.argument('managed_image_destinations', arg_type=ib_cutput_type)
c.argument('shared_image_destinations', arg_type=ib_cutput_type)
c.argument('output_name', arg_type=ib_cutput_type)
with self.argument_context('image builder output') as c:
ib_sig_regions_help = "Space-separated list of regions to replicate the image version into."
ib_img_location_help = "Location where the customized image will be created."
c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.")
c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help)
c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.")
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help)
with self.argument_context('image builder output add') as c:
ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help']
ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"])
ib_default_loc_help = " Defaults to resource group's location."
c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help)
c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true')
c.argument('tags', arg_type=ib_artifact_tags_type)
c.ignore('location')
with self.argument_context('image builder customizer') as c:
ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart")
ib_win_update_type = CLIArgumentType(arg_group="Windows Update")
ib_script_type = CLIArgumentType(arg_group="Shell and Powershell")
ib_powershell_type = CLIArgumentType(arg_group="Powershell")
ib_file_customizer_type = CLIArgumentType(arg_group="File")
c.argument('customizer_name', help="Name of the customizer.")
c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType))
# Script Args
c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.")
c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.")
# Powershell Specific Args
c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers")
# Windows Restart Specific Args
c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.")
c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.")
c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m")
# Windows Update Specific Args
c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.')
c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)')
c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)')
# File Args
c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.")
c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image")
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable secure boot.')
c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable vTPM.')
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE):
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine")
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.')
c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01')
c.argument('platform_fault_domain', min_api='2020-06-01',
help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view')
c.argument('count', type=int, is_preview=True,
help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.')
c.argument('security_type', arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01',
help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.')
c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable secure boot. It is part of trusted launch.')
c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable vTPM. It is part of trusted launch.')
with self.argument_context('vm create', arg_group='Storage') as c:
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', deprecate_info=c.deprecate(expiration='3.0.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'])
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
with self.argument_context('vm image terms') as c:
c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted')
c.argument('publisher', help='Image publisher')
c.argument('offer', help='Image offer')
c.argument('plan', help='Image billing plan')
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
with self.argument_context('vm host') as c:
c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group")
c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host")
c.ignore('expand')
with self.argument_context('vm host create') as c:
c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int,
help="Fault domain of the host within a group. Allowed values: 0, 1, 2")
c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(),
help="Replace the host automatically if a failure occurs")
c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes),
help="The software license type that will be applied to the VMs deployed on the dedicated host.")
c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/en-us/pricing/details/virtual-machines/dedicated-host/")
with self.argument_context('vm host list') as c:
c.argument('host_group_name', id_part=None)
with self.argument_context('vm host group') as c:
c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group")
c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Specify whether virtual machines or virtual machine scale sets can be placed automatically '
'on the dedicated host group. Automatic placement means resources are allocated on dedicated '
'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to '
'false when not provided.')
with self.argument_context('vm host group create') as c:
c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int,
help="Number of fault domains that the host group can span.")
c.argument('zones', zone_type)
for scope in ["vm host", "vm host group"]:
with self.argument_context("{} create".format(scope)) as c:
location_type = get_location_type(self.cli_ctx)
custom_location_msg = " Otherwise, location will default to the resource group's location"
custom_location_type = CLIArgumentType(overrides=location_type,
help=location_type.settings["help"] + custom_location_msg)
c.argument('location', arg_type=custom_location_type)
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
c.argument('host_group', min_api='2020-06-01',
help='Name or ID of dedicated host group that the virtual machine scale set resides in')
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create') as c:
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group."
" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.")
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set")
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long')
c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.',
arg_type=get_enum_type(['Uniform', 'Flexible']), is_preview=True)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('automatic_repairs_grace_period', min_api='2018-10-01',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.')
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vmss update') as c:
protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01')
c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.")
c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).")
c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(),
help='Enable terminate notification')
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('scale_in_policy', scale_in_policy_type)
with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c:
c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs')
c.argument(
'automatic_repairs_grace_period',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.'
)
for scope in ['vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('terminate_notification_time', min_api='2019-03-01',
help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted')
for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
with self.argument_context(scope) as c:
c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix))
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss set-orchestration-service-state') as c:
c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.')
c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.')
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade',
options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')],
arg_type=get_three_state_flag(),
help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file")
with self.argument_context('{} stop'.format(scope)) as c:
c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.')
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if DiskStorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.')
c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01',
help='Names or IDs (space delimited) of disk encryption sets for data disks.')
c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.')
c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default.', default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access")
c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have")
c.ignore('identity_role_id')
with self.argument_context('vm auto-shutdown') as c:
c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.')
c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)')
c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730')
c.argument('webhook', help='The webhook URL to which the notification will be sent')
c.argument('location', validator=get_default_location_from_resource_group)
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.')
c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(),
help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.')
with self.argument_context('vm extension set') as c:
c.argument('vm_extension_name', name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'),
help='Name of the extension.', id_part=None)
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \
"To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \
"To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \
"Remember that after setting the license type on VMSS, execute the following command to apply the changes to the existing VM." \
"`az vmss update-instances -g 'Your resource group name' -n 'Your VMSS name' --instance-ids '*' `" \
"For more information see the Azure Windows VM online docs."
c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'None']))
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True,
help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons')
with self.argument_context('vm update') as c:
c.argument('license_type', help=license_msg, arg_type=get_enum_type(
['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_ELS_6', 'None']))
with self.argument_context('vmss create') as c:
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.")
c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
with self.argument_context('sig update') as c:
c.ignore('gallery')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
with self.argument_context('sig image-version create') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('os_snapshot', help='Name or ID of OS disk snapshot')
c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots')
c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions",
arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01')
c.argument('target_region_encryption', nargs='+',
help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.')
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace,
help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. '
'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
# endregion
# region Proximity Placement Group
with self.argument_context('ppg', min_api='2018-04-01') as c:
c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.")
with self.argument_context('ppg create', min_api='2018-04-01') as c:
c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.")
c.argument('tags', tags_type)
with self.argument_context('ppg show', min_api='2019-07-01') as c:
c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.')
for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'),
('vm availability-set create', 'availability set'),
('vm update', 'VM'), ('vmss update', 'VMSS'),
('vm availability-set update', 'availability set')]:
with self.argument_context(scope, min_api='2018-04-01') as c:
c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item),
validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added.
# endregion
# region VM Monitor
with self.argument_context('vm monitor log show') as c:
c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.")
c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.")
with self.argument_context('vm monitor metrics') as c:
c.argument('metricnamespace', options_list=['--namespace'],
help='Namespace to query metric definitions for.')
with self.argument_context('vm monitor metrics tail') as c:
from azure.mgmt.monitor.models import AggregationType
c.extra('resource_group_name', required=True)
c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
c.argument('metadata', action='store_true')
c.argument('dimension', nargs='*', validator=validate_metric_dimension)
c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*')
c.argument('metrics', nargs='*')
c.argument('orderby',
help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc')
c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.')
c.argument('filters', options_list=['--filter'])
c.argument('metric_namespace', options_list=['--namespace'])
with self.argument_context('vm monitor metrics tail', arg_group='Time') as c:
c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.'))
c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.'))
c.argument('offset', type=get_period_type(as_timedelta=True))
c.argument('interval', arg_group='Time', type=get_period_type())
with self.argument_context('vm monitor metrics list-definitions') as c:
c.extra('resource_group_name', required=True)
c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
# endregion
# region disk encryption set
with self.argument_context('disk-encryption-set') as c:
c.argument('disk_encryption_set_name', disk_encryption_set_name)
c.argument('key_url', help='URL pointing to a key or secret in KeyVault.')
c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.')
c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']),
help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endregion
# region DiskAccess
with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c:
c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endRegion
|
54,086 | def aks_reconcile(cmd, client, resource_group_name, name, # pylint: disable=unused-argument
headers, no_wait):
mc = client.get(resource_group_name, name)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
headers=headers)
| def aks_reconcile(cmd, client, resource_group_name, name, # pylint: disable=unused-argument
aks_custom_headers, no_wait):
mc = client.get(resource_group_name, name)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
headers=headers)
|
33,293 | def user_is_authenticated(user):
if user and hasattr(user, 'is_authenticated'):
if isinstance(user.is_authenticated, Callable):
authenticated = user.is_authenticated()
else:
authenticated = user.is_authenticated
elif user:
authenticated = True
else:
authenticated = False
return authenticated
| def user_is_authenticated(user):
if user and hasattr(user, 'is_authenticated'):
if callable(user.is_authenticated):
authenticated = user.is_authenticated()
else:
authenticated = user.is_authenticated
elif user:
authenticated = True
else:
authenticated = False
return authenticated
|
46,338 | def daal_check_version(rule):
target = tuple(map(int, (dv[0:4], dv[4:8], dv[8:9])))
if not isinstance(rule[0], type(target)):
if rule > target:
return False
else:
for i, rule_item in enumerate(rule):
if rule_item > target:
return False
if rule_item[0]==target[0]:
break
return True
| def daal_check_version(rule):
target = tuple(map(int, (dv[0:4], dv[4:8], dv[8:9])))
if not isinstance(rule[0], type(target)):
if rule > target:
return False
else:
for rule_item in rule:
if rule_item > target:
return False
if rule_item[0]==target[0]:
break
return True
|
45,107 | def with_toloka_client(func: Callable) -> Callable:
"""
Decorator that allows function to pass `secret_name` and `env` args
and operate with `toloka_client` instance.
Args:
- func (Callable): function, that operate with `toloka_client` argument.
Returns:
- Callable: the wrapper, that takes optional `secret_name` and `env` arguments
and operates with default `toloka_token` if they are not passed.
Example:
>>> @with_toloka_client
... def some_func(toloka_client: TolokaClient):
... toloka_client.create_project(...)
...
>>> some_func() # Use default toloka_client created using TOLOKA_TOKEN secret.
>>> some_func(secret_name='OTHER_ACCOUNT_SECRET') # Allow to pass other secret.
...
"""
def _wrapper(
*args,
secret_name: str = DEFAULT_TOLOKA_SECRET_NAME,
env: str = DEFAULT_TOLOKA_ENV,
**kwargs,
) -> Any:
token = Secret(secret_name).get()
toloka_client = TolokaClient(token, env)
return partial(add_headers("prefect")(func), toloka_client=toloka_client)(
*args, **kwargs
)
return with_updated_signature(
func,
_wrapper,
remove_func_args=("toloka_client",),
add_wrapper_args=("secret_name", "env"),
)
| def with_toloka_client(func: Callable) -> Callable:
"""
Decorator that allows function to pass `secret_name` and `env` args
and operate with `toloka_client` instance.
Args:
- func (Callable): function, that operate with `toloka_client` argument.
Returns:
- Callable: The wrapper, which takes optional `secret_name` and `env` arguments
and operates with default `toloka_token` if they are not passed.
Example:
>>> @with_toloka_client
... def some_func(toloka_client: TolokaClient):
... toloka_client.create_project(...)
...
>>> some_func() # Use default toloka_client created using TOLOKA_TOKEN secret.
>>> some_func(secret_name='OTHER_ACCOUNT_SECRET') # Allow to pass other secret.
...
"""
def _wrapper(
*args,
secret_name: str = DEFAULT_TOLOKA_SECRET_NAME,
env: str = DEFAULT_TOLOKA_ENV,
**kwargs,
) -> Any:
token = Secret(secret_name).get()
toloka_client = TolokaClient(token, env)
return partial(add_headers("prefect")(func), toloka_client=toloka_client)(
*args, **kwargs
)
return with_updated_signature(
func,
_wrapper,
remove_func_args=("toloka_client",),
add_wrapper_args=("secret_name", "env"),
)
|
25,969 | def list_security_tasks(client, resource_group_name=None):
for loc in client.locations.list():
client._config.asc_location = loc.name
if resource_group_name:
return client.tasks.list_by_resource_group(resource_group_name)
return client.tasks.list()
| def list_security_tasks(client, resource_group_name=None):
for loc in client.locations.list():
client._config.asc_location = loc.name # pylint: disable=protected-access
if resource_group_name:
return client.tasks.list_by_resource_group(resource_group_name)
return client.tasks.list()
|
4,464 | def _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None,
extra_points=None, dig_ch_pos=None,
coord_frame='head'):
"""Construct digitizer info for the info.
Parameters
----------
nasion : array-like | numpy.ndarray, shape (3,) | None
Point designated as the nasion point.
lpa : array-like | numpy.ndarray, shape (3,) | None
Point designated as the left auricular point.
rpa : array-like | numpy.ndarray, shape (3,) | None
Point designated as the right auricular point.
hpi : array-like | numpy.ndarray, shape (n_points, 3) | None
Points designated as head position indicator points.
extra_points : array-like | numpy.ndarray, shape (n_points, 3)
Points designed as the headshape points.
dig_ch_pos : dict
Dict of EEG channel positions.
coord_frame : str
The coordinate frame of the points. Usually this is "unknown"
for native digitizer space. Defaults to "head".
Returns
-------
dig : list of dicts
A container of DigPoints to be added to the info['dig'].
"""
coord_frame = _coord_frame_const(coord_frame)
dig = []
if lpa is not None:
lpa = np.asarray(lpa)
if lpa.shape != (3,):
raise ValueError('LPA should have the shape (3,) instead of %s'
% (lpa.shape,))
dig.append({'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': coord_frame})
if nasion is not None:
nasion = np.asarray(nasion)
if nasion.shape != (3,):
raise ValueError('Nasion should have the shape (3,) instead of %s'
% (nasion.shape,))
dig.append({'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': coord_frame})
if rpa is not None:
rpa = np.asarray(rpa)
if rpa.shape != (3,):
raise ValueError('RPA should have the shape (3,) instead of %s'
% (rpa.shape,))
dig.append({'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': coord_frame})
if hpi is not None:
hpi = np.asarray(hpi)
if hpi.ndim != 2 or hpi.shape[1] != 3:
raise ValueError('HPI should have the shape (n_points, 3) instead '
'of %s' % (hpi.shape,))
for idx, point in enumerate(hpi):
dig.append({'r': point, 'ident': idx + 1,
'kind': FIFF.FIFFV_POINT_HPI,
'coord_frame': coord_frame})
if extra_points is not None:
extra_points = np.asarray(extra_points)
if len(extra_points) and extra_points.shape[1] != 3:
raise ValueError('Points should have the shape (n_points, 3) '
'instead of %s' % (extra_points.shape,))
for idx, point in enumerate(extra_points):
dig.append({'r': point, 'ident': idx + 1,
'kind': FIFF.FIFFV_POINT_EXTRA,
'coord_frame': coord_frame})
if dig_ch_pos is not None:
try: # use the last 3 as int if possible (e.g., EEG001->1)
idents = []
for key, value in dig_ch_pos.items():
_validate_type(key, str, 'dig_ch_pos')
_validate_type(value, (np.ndarray, list, tuple), 'dig_ch_pos')
if isinstance(value, (list, tuple)):
value = np.array(value)
dig_ch_pos[key] = value
if value.dtype == int or value.dtype == np.float32:
value = value.astype(np.float64)
dig_ch_pos[key] = value
if value.shape != (3, ) or value.dtype != np.float64:
raise RuntimeError("The position should be a 1D array of "
"floats [x, y, z].")
idents.append(int(key[-3:]))
except ValueError: # and if any conversion fails, simply use arange
idents = np.arange(1, len(dig_ch_pos) + 1)
for key, ident in zip(dig_ch_pos, idents):
dig.append({'r': dig_ch_pos[key], 'ident': int(ident),
'kind': FIFF.FIFFV_POINT_EEG,
'coord_frame': coord_frame})
return _format_dig_points(dig)
| def _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None,
extra_points=None, dig_ch_pos=None,
coord_frame='head'):
"""Construct digitizer info for the info.
Parameters
----------
nasion : array-like | numpy.ndarray, shape (3,) | None
Point designated as the nasion point.
lpa : array-like | numpy.ndarray, shape (3,) | None
Point designated as the left auricular point.
rpa : array-like | numpy.ndarray, shape (3,) | None
Point designated as the right auricular point.
hpi : array-like | numpy.ndarray, shape (n_points, 3) | None
Points designated as head position indicator points.
extra_points : array-like | numpy.ndarray, shape (n_points, 3)
Points designed as the headshape points.
dig_ch_pos : dict
Dict of EEG channel positions.
coord_frame : str
The coordinate frame of the points. Usually this is "unknown"
for native digitizer space. Defaults to "head".
Returns
-------
dig : list of dicts
A container of DigPoints to be added to the info['dig'].
"""
coord_frame = _coord_frame_const(coord_frame)
dig = []
if lpa is not None:
lpa = np.asarray(lpa)
if lpa.shape != (3,):
raise ValueError('LPA should have the shape (3,) instead of %s'
% (lpa.shape,))
dig.append({'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': coord_frame})
if nasion is not None:
nasion = np.asarray(nasion)
if nasion.shape != (3,):
raise ValueError('Nasion should have the shape (3,) instead of %s'
% (nasion.shape,))
dig.append({'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': coord_frame})
if rpa is not None:
rpa = np.asarray(rpa)
if rpa.shape != (3,):
raise ValueError('RPA should have the shape (3,) instead of %s'
% (rpa.shape,))
dig.append({'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': coord_frame})
if hpi is not None:
hpi = np.asarray(hpi)
if hpi.ndim != 2 or hpi.shape[1] != 3:
raise ValueError('HPI should have the shape (n_points, 3) instead '
'of %s' % (hpi.shape,))
for idx, point in enumerate(hpi):
dig.append({'r': point, 'ident': idx + 1,
'kind': FIFF.FIFFV_POINT_HPI,
'coord_frame': coord_frame})
if extra_points is not None:
extra_points = np.asarray(extra_points)
if len(extra_points) and extra_points.shape[1] != 3:
raise ValueError('Points should have the shape (n_points, 3) '
'instead of %s' % (extra_points.shape,))
for idx, point in enumerate(extra_points):
dig.append({'r': point, 'ident': idx + 1,
'kind': FIFF.FIFFV_POINT_EXTRA,
'coord_frame': coord_frame})
if dig_ch_pos is not None:
try: # use the last 3 as int if possible (e.g., EEG001->1)
idents = []
for key, value in dig_ch_pos.items():
_validate_type(key, str, 'dig_ch_pos')
_validate_type(value, (np.ndarray, list, tuple), 'dig_ch_pos')
if isinstance(value, (list, tuple)):
value = np.array(value)
dig_ch_pos[key] = value
if value.dtype == int or value.dtype == np.float32:
value = value.astype(np.float64)
dig_ch_pos[key] = value
if value.shape != (3, ):
raise RuntimeError("The position should be a 1D array of "
"floats [x, y, z].")
idents.append(int(key[-3:]))
except ValueError: # and if any conversion fails, simply use arange
idents = np.arange(1, len(dig_ch_pos) + 1)
for key, ident in zip(dig_ch_pos, idents):
dig.append({'r': dig_ch_pos[key], 'ident': int(ident),
'kind': FIFF.FIFFV_POINT_EEG,
'coord_frame': coord_frame})
return _format_dig_points(dig)
|
31,813 | def item_purchase_command(client: Client, args: Dict[str, Any]) -> Union[CommandResults, None]:
try:
item_id = str(args.get('item_id'))
bot_id = ''
room_id = ''
# Get mentions list:
mentions_list_res = client.get_mention_list()
if isinstance(mentions_list_res, dict) and isinstance(mentions_list_res.get('data', ''), list) \
and len(mentions_list_res['data']) > 0 and isinstance(mentions_list_res['data'][0], dict) \
and 'id' in mentions_list_res['data'][0] and 'alias' in mentions_list_res['data'][0]:
mentions_list = mentions_list_res['data']
# Fetch some important item data.
item_res = client.get_item(item_id=item_id)
if isinstance(item_res, dict) and isinstance(item_res.get('data', ''), dict):
# Prevent execution on unsupported sub types.
if item_res['data'].get('sub_type', -1) not in SUPPORTED_SUB_TYPES_FOR_PURCHASE:
raise Exception("Sub type not supported for purchasing!")
# Extract bot ID and incident ID.
incident_id = item_res['data'].get('feed_property_id', '')
if not incident_id:
raise Exception("Item ID doesn't found!")
bot_id = item_res['data'].get('bot_id', '')
if not bot_id:
raise Exception("Bot ID doesn't found!")
# Check if chat room already exists.
incident_res = client.incident_get_items(incident_id=incident_id)
if isinstance(incident_res, dict) and isinstance(incident_res.get('data', ''), dict) and \
isinstance(incident_res['data'].get('chat', ''), dict) and \
isinstance(incident_res['data']['chat'].get('room', ''), dict):
room_id = incident_res['data']['chat']['room'].get('id', '')
# Send the action status.
action_res = client.action_on_item(item_id=item_id, action="request")
if isinstance(action_res, dict) and isinstance(action_res.get('data', ''), dict) \
and action_res['data'].get('value', ''):
# Send the chat request.
message = {
"text": "Hi <b>@KELA</b> , I would like to acquire further details about bot: " + bot_id,
"mentionsList": mentions_list
}
room = {"itemId": incident_id, "itemType": "FEED_PROPERTY"}
if room_id:
room['id'] = room_id
# Send message.
message_res = client.message_on_incident(message=message, room=room)
if isinstance(message_res, dict) and isinstance(message_res.get('data', ''), dict) \
and message_res['data'].get('roomId', ''):
# readable_output = 'Item marked for purchasing'
readable_output = 'Bot ID (' + bot_id + ') marked for purchasing'
else:
raise Exception("Action failed!")
else:
raise Exception("Action failed!")
else:
readable_output = f'No data found for item ID: {incident_id}'
else:
raise Exception("Mentions list doesn't found!")
return CommandResults(readable_output=readable_output)
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute command on {item_id}.\nError:\n {str(e)}')
return None
| def item_purchase_command(client: Client, args: Dict[str, Any]) -> Union[CommandResults, None]:
try:
item_id = args.get('item_id')
bot_id = ''
room_id = ''
# Get mentions list:
mentions_list_res = client.get_mention_list()
if isinstance(mentions_list_res, dict) and isinstance(mentions_list_res.get('data', ''), list) \
and len(mentions_list_res['data']) > 0 and isinstance(mentions_list_res['data'][0], dict) \
and 'id' in mentions_list_res['data'][0] and 'alias' in mentions_list_res['data'][0]:
mentions_list = mentions_list_res['data']
# Fetch some important item data.
item_res = client.get_item(item_id=item_id)
if isinstance(item_res, dict) and isinstance(item_res.get('data', ''), dict):
# Prevent execution on unsupported sub types.
if item_res['data'].get('sub_type', -1) not in SUPPORTED_SUB_TYPES_FOR_PURCHASE:
raise Exception("Sub type not supported for purchasing!")
# Extract bot ID and incident ID.
incident_id = item_res['data'].get('feed_property_id', '')
if not incident_id:
raise Exception("Item ID doesn't found!")
bot_id = item_res['data'].get('bot_id', '')
if not bot_id:
raise Exception("Bot ID doesn't found!")
# Check if chat room already exists.
incident_res = client.incident_get_items(incident_id=incident_id)
if isinstance(incident_res, dict) and isinstance(incident_res.get('data', ''), dict) and \
isinstance(incident_res['data'].get('chat', ''), dict) and \
isinstance(incident_res['data']['chat'].get('room', ''), dict):
room_id = incident_res['data']['chat']['room'].get('id', '')
# Send the action status.
action_res = client.action_on_item(item_id=item_id, action="request")
if isinstance(action_res, dict) and isinstance(action_res.get('data', ''), dict) \
and action_res['data'].get('value', ''):
# Send the chat request.
message = {
"text": "Hi <b>@KELA</b> , I would like to acquire further details about bot: " + bot_id,
"mentionsList": mentions_list
}
room = {"itemId": incident_id, "itemType": "FEED_PROPERTY"}
if room_id:
room['id'] = room_id
# Send message.
message_res = client.message_on_incident(message=message, room=room)
if isinstance(message_res, dict) and isinstance(message_res.get('data', ''), dict) \
and message_res['data'].get('roomId', ''):
# readable_output = 'Item marked for purchasing'
readable_output = 'Bot ID (' + bot_id + ') marked for purchasing'
else:
raise Exception("Action failed!")
else:
raise Exception("Action failed!")
else:
readable_output = f'No data found for item ID: {incident_id}'
else:
raise Exception("Mentions list doesn't found!")
return CommandResults(readable_output=readable_output)
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute command on {item_id}.\nError:\n {str(e)}')
return None
|
54,294 | def tmu(mu, data, pdf, init_pars, par_bounds):
r"""
The test statistic, :math:`t_{\mu}`, for establishing an two-sided
intervals on the strength parameter, :math:`\mu`.
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
Returns:
Float: The calculated test statistic, :math:`q_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] == 0:
log.warning(
'tmu test statistic used for fit configuration with POI bounded at zero. Use qmutilde.'
)
return _tmu_like(mu, data, pdf, init_pars, par_bounds)
| def tmu(mu, data, pdf, init_pars, par_bounds):
r"""
The test statistic, :math:`t_{\mu}`, for establishing a two-sided
interval on the strength parameter, :math:`\mu`.
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
Returns:
Float: The calculated test statistic, :math:`q_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] == 0:
log.warning(
'tmu test statistic used for fit configuration with POI bounded at zero. Use qmutilde.'
)
return _tmu_like(mu, data, pdf, init_pars, par_bounds)
|
6,931 | def get_file_path(module, dt, dn):
dt, dn = scrub_dt_dn(dt, dn)
path = os.path.join(get_module_path(module), os.path.join(dt, dn, dn + ".json"))
return path
| def get_file_path(module, dt, dn):
dt, dn = scrub_dt_dn(dt, dn)
path = os.path.join(get_module_path(module), os.path.join(dt, dn, f"{dn}.json"))
return path
|
34,935 | def build_and_run(mod, inputs, outputs, params, device, enable_acl=True, no_runs=1,
tvm_ops=0, acl_partitions=1, config=None):
"""Build and run the relay module."""
if not config:
config = {}
try:
lib = build_module(mod, device.target, params, enable_acl, tvm_ops, acl_partitions)
except Exception as e:
err_msg = "The module could not be built.\n"
if config:
err_msg += f"The test failed with the following parameters: {config}\n"
err_msg += str(e)
raise Exception(err_msg)
lib = update_lib(lib, device.device, device.cross_compile)
gen_module = graph_runtime.GraphModule(lib['default'](device.device.cpu(0)))
gen_module.set_input(**inputs)
out = []
for _ in range(no_runs):
gen_module.run()
out.append([gen_module.get_output(i) for i in range(outputs)])
return out
| def build_and_run(mod, inputs, outputs, params, device, enable_acl=True, no_runs=1,
tvm_ops=0, acl_partitions=1, config=None):
"""Build and run the relay module."""
if config is None:
config = {}
try:
lib = build_module(mod, device.target, params, enable_acl, tvm_ops, acl_partitions)
except Exception as e:
err_msg = "The module could not be built.\n"
if config:
err_msg += f"The test failed with the following parameters: {config}\n"
err_msg += str(e)
raise Exception(err_msg)
lib = update_lib(lib, device.device, device.cross_compile)
gen_module = graph_runtime.GraphModule(lib['default'](device.device.cpu(0)))
gen_module.set_input(**inputs)
out = []
for _ in range(no_runs):
gen_module.run()
out.append([gen_module.get_output(i) for i in range(outputs)])
return out
|
53,942 | def _get_score_names(results, *, kind="test"):
key_parts = [key.rsplit("_", 1) for key in results.keys()]
prefix = "mean_%s" % kind
return {
parts[1]
for parts in key_parts
if len(parts) == 2 and parts[0] == prefix
}
| def _get_score_names(cv_results, *, kind="test"):
prefix = f"mean_{kind}"
return {key[len(prefix):]
for key in cv_results.keys()
if key.startswith(prefix)}
|
44,812 | def test_log_behaivor_when_patch_fn_raise_error_before_original_called():
class Test1:
def good_fn(self, x):
pass
def bad_fn(self, x):
raise RuntimeError("bad function called")
def patched_fn(original, self, *args, **kwargs):
raise RuntimeError("patch function error")
flavor_name = "test_log_behaivor_when_patch_fn_raise_error_before_original_called"
@autologging_integration(flavor_name)
def autolog(disable=False, exclusive=False, silent=False): # pylint: disable=unused-argument
safe_patch(flavor_name, Test1, "good_fn", patched_fn, manage_run=False)
safe_patch(flavor_name, Test1, "bad_fn", patched_fn, manage_run=False)
autolog()
with mock.patch(
"mlflow.utils.autologging_utils.events.AutologgingEventLogger.log_patch_function_error"
) as mock_log_patch_function_error, mock.patch(
"mlflow.utils.autologging_utils.events.AutologgingEventLogger.log_original_function_success"
) as mock_log_original_function_success, mock.patch(
"mlflow.utils.autologging_utils.events.AutologgingEventLogger.log_original_function_error"
) as mock_log_original_function_error:
t1 = Test1()
t1.good_fn(1)
mock_log_patch_function_error.assert_called_once()
mock_log_original_function_success.assert_called_once()
mock_log_original_function_error.assert_not_called()
mock_log_patch_function_error.reset_mock()
mock_log_original_function_success.reset_mock()
mock_log_original_function_error.reset_mock()
with pytest.raises(RuntimeError, match="bad function called"):
t1.bad_fn(1)
mock_log_patch_function_error.assert_not_called()
mock_log_original_function_success.assert_not_called()
mock_log_original_function_error.assert_called_once()
| def test_log_behavior_when_patch_fn_raise_error_before_original_called():
class Test1:
def good_fn(self, x):
pass
def bad_fn(self, x):
raise RuntimeError("bad function called")
def patched_fn(original, self, *args, **kwargs):
raise RuntimeError("patch function error")
flavor_name = "test_log_behaivor_when_patch_fn_raise_error_before_original_called"
@autologging_integration(flavor_name)
def autolog(disable=False, exclusive=False, silent=False): # pylint: disable=unused-argument
safe_patch(flavor_name, Test1, "good_fn", patched_fn, manage_run=False)
safe_patch(flavor_name, Test1, "bad_fn", patched_fn, manage_run=False)
autolog()
with mock.patch(
"mlflow.utils.autologging_utils.events.AutologgingEventLogger.log_patch_function_error"
) as mock_log_patch_function_error, mock.patch(
"mlflow.utils.autologging_utils.events.AutologgingEventLogger.log_original_function_success"
) as mock_log_original_function_success, mock.patch(
"mlflow.utils.autologging_utils.events.AutologgingEventLogger.log_original_function_error"
) as mock_log_original_function_error:
t1 = Test1()
t1.good_fn(1)
mock_log_patch_function_error.assert_called_once()
mock_log_original_function_success.assert_called_once()
mock_log_original_function_error.assert_not_called()
mock_log_patch_function_error.reset_mock()
mock_log_original_function_success.reset_mock()
mock_log_original_function_error.reset_mock()
with pytest.raises(RuntimeError, match="bad function called"):
t1.bad_fn(1)
mock_log_patch_function_error.assert_not_called()
mock_log_original_function_success.assert_not_called()
mock_log_original_function_error.assert_called_once()
|
32,161 | def merge_version_blocks(pack_versions_dict: dict) -> Tuple[str, str]:
"""
merge several pack release note versions into a single block.
Args:
pack_versions_dict: a mapping from a pack version to a release notes file content.
Returns:
str: a single pack release note block
str: the pack's latest version
"""
latest_version = '1.0.0'
entities_data: dict = {}
for pack_version, version_release_notes in sorted(pack_versions_dict.items(),
key=lambda pack_item: Version(pack_item[0])):
latest_version = pack_version
version_release_notes = version_release_notes.strip()
# extract release notes sections by content types (all playbooks, all scripts, etc...)
# assuming all entity titles start with level 4 header ("####") and then a list of all comments
sections = ENTITY_TYPE_SECTION_REGEX.findall(version_release_notes)
for section in sections:
# one of scripts, playbooks, integrations, layouts, incident fields, etc...
entity_type = section[0] or section[2]
# blocks of entity name and related release notes comments
entity_section = section[1] or section[3]
entities_data.setdefault(entity_type, {})
if not entity_section.strip().startswith('#####'):
entity_section = "##### special_msg\n" + entity_section
# extract release notes comments by entity
# assuming all entity titles start with level 5 header ("#####") and then a list of all comments
entity_comments = ENTITY_SECTION_REGEX.findall(entity_section)
for entity in entity_comments:
# name of the script, integration, playbook, etc...
entity_name = entity[0] or entity[2] or entity[4]
entity_name = entity_name.replace('__', '')
# release notes of the entity
entity_comment = entity[1] or entity[3] or entity[5]
if entity_name in entities_data[entity_type]:
entities_data[entity_type][entity_name] += f'{entity_comment.strip()}\n'
else:
entities_data[entity_type][entity_name] = f'{entity_comment.strip()}\n'
pack_release_notes = construct_entities_block(entities_data).strip()
return pack_release_notes, latest_version
| def merge_version_blocks(pack_versions_dict: dict) -> Tuple[str, str]:
"""
merge several pack release note versions into a single block.
Args:
pack_versions_dict: a mapping from a pack version to a release notes file content.
Returns:
str: a single pack release note block
str: the pack's latest version
"""
latest_version = '1.0.0'
entities_data: dict = {}
for pack_version, version_release_notes in sorted(pack_versions_dict.items(),
key=lambda pack_item: Version(pack_item[0])):
latest_version = pack_version
version_release_notes = version_release_notes.strip()
# extract release notes sections by content types (all playbooks, all scripts, etc...)
# assuming all entity titles start with level 4 header ("####") and then a list of all comments
sections = ENTITY_TYPE_SECTION_REGEX.findall(version_release_notes)
for section in sections:
# one of scripts, playbooks, integrations, layouts, incident fields, etc...
entity_type = section[0] or section[2]
# blocks of entity name and related release notes comments
entity_section = section[1] or section[3]
entities_data.setdefault(entity_type, {})
if not entity_section.strip().startswith('#####'):
entity_section = "##### [special_msg]\n" + entity_section
# extract release notes comments by entity
# assuming all entity titles start with level 5 header ("#####") and then a list of all comments
entity_comments = ENTITY_SECTION_REGEX.findall(entity_section)
for entity in entity_comments:
# name of the script, integration, playbook, etc...
entity_name = entity[0] or entity[2] or entity[4]
entity_name = entity_name.replace('__', '')
# release notes of the entity
entity_comment = entity[1] or entity[3] or entity[5]
if entity_name in entities_data[entity_type]:
entities_data[entity_type][entity_name] += f'{entity_comment.strip()}\n'
else:
entities_data[entity_type][entity_name] = f'{entity_comment.strip()}\n'
pack_release_notes = construct_entities_block(entities_data).strip()
return pack_release_notes, latest_version
|
12,362 | def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
"""Run dhclient on the interface without scripts or filesystem artifacts.
@param dhclient_cmd_path: Full path to the dhclient used.
@param interface: Name of the network inteface on which to dhclient.
@param cleandir: The directory from which to run dhclient as well as store
dhcp leases.
@return: A list of dicts of representing the dhcp leases parsed from the
dhcp.leases file or empty list.
"""
LOG.debug('Performing a dhcp discovery on %s', interface)
# XXX We copy dhclient out of /sbin/dhclient to avoid dealing with strict
# app armor profiles which disallow running dhclient -sf <our-script-file>.
# We want to avoid running /sbin/dhclient-script because of side-effects in
# /etc/resolv.conf any any other vendor specific scripts in
# /etc/dhcp/dhclient*hooks.d.
sandbox_dhclient_cmd = os.path.join(cleandir, 'dhclient')
util.copy(dhclient_cmd_path, sandbox_dhclient_cmd)
pid_file = os.path.join(cleandir, 'dhclient.pid')
lease_file = os.path.join(cleandir, 'dhcp.leases')
# ISC dhclient needs the interface up to send initial discovery packets.
# Generally dhclient relies on dhclient-script PREINIT action to bring the
# link up before attempting discovery. Since we are using -sf /bin/true,
# we need to do that "link up" ourselves first.
util.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file,
'-pf', pid_file, interface, '-sf', '/bin/true']
pout = util.subp(cmd, capture=True)
# Wait for pid file and lease file to appear, and for the process
# named by the pid file to daemonize (have pid 1 as its parent). If we
# try to read the lease file before daemonization happens, we might try
# to read it before the dhclient has actually written it. We also have
# to wait until the dhclient has become a daemon so we can be sure to
# kill the correct process, thus freeing cleandir to be deleted back
# up the callstack.
missing = util.wait_for_files(
[pid_file, lease_file], maxwait=5, naplen=0.01)
if missing:
LOG.warning("dhclient did not produce expected files: %s",
', '.join(os.path.basename(f) for f in missing))
return []
ppid = 'unknown'
for _ in range(0, 1000):
pid_content = util.load_file(pid_file).strip()
try:
pid = int(pid_content)
except ValueError:
pass
else:
ppid = util.get_proc_ppid(pid)
if ppid == 1:
if len(pout) == 2:
LOG.debug('dhclient error stream: %s', pout[1])
LOG.debug('killing dhclient with pid=%s', pid)
os.kill(pid, signal.SIGKILL)
return parse_dhcp_lease_file(lease_file)
time.sleep(0.01)
LOG.error(
'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds',
pid_content, ppid, 0.01 * 1000
)
return parse_dhcp_lease_file(lease_file)
| def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
"""Run dhclient on the interface without scripts or filesystem artifacts.
@param dhclient_cmd_path: Full path to the dhclient used.
@param interface: Name of the network inteface on which to dhclient.
@param cleandir: The directory from which to run dhclient as well as store
dhcp leases.
@return: A list of dicts of representing the dhcp leases parsed from the
dhcp.leases file or empty list.
"""
LOG.debug('Performing a dhcp discovery on %s', interface)
# XXX We copy dhclient out of /sbin/dhclient to avoid dealing with strict
# app armor profiles which disallow running dhclient -sf <our-script-file>.
# We want to avoid running /sbin/dhclient-script because of side-effects in
# /etc/resolv.conf any any other vendor specific scripts in
# /etc/dhcp/dhclient*hooks.d.
sandbox_dhclient_cmd = os.path.join(cleandir, 'dhclient')
util.copy(dhclient_cmd_path, sandbox_dhclient_cmd)
pid_file = os.path.join(cleandir, 'dhclient.pid')
lease_file = os.path.join(cleandir, 'dhcp.leases')
# ISC dhclient needs the interface up to send initial discovery packets.
# Generally dhclient relies on dhclient-script PREINIT action to bring the
# link up before attempting discovery. Since we are using -sf /bin/true,
# we need to do that "link up" ourselves first.
util.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file,
'-pf', pid_file, interface, '-sf', '/bin/true']
_out, err = util.subp(cmd, capture=True)
# Wait for pid file and lease file to appear, and for the process
# named by the pid file to daemonize (have pid 1 as its parent). If we
# try to read the lease file before daemonization happens, we might try
# to read it before the dhclient has actually written it. We also have
# to wait until the dhclient has become a daemon so we can be sure to
# kill the correct process, thus freeing cleandir to be deleted back
# up the callstack.
missing = util.wait_for_files(
[pid_file, lease_file], maxwait=5, naplen=0.01)
if missing:
LOG.warning("dhclient did not produce expected files: %s",
', '.join(os.path.basename(f) for f in missing))
return []
ppid = 'unknown'
for _ in range(0, 1000):
pid_content = util.load_file(pid_file).strip()
try:
pid = int(pid_content)
except ValueError:
pass
else:
ppid = util.get_proc_ppid(pid)
if ppid == 1:
if len(pout) == 2:
LOG.debug('dhclient error stream: %s', pout[1])
LOG.debug('killing dhclient with pid=%s', pid)
os.kill(pid, signal.SIGKILL)
return parse_dhcp_lease_file(lease_file)
time.sleep(0.01)
LOG.error(
'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds',
pid_content, ppid, 0.01 * 1000
)
return parse_dhcp_lease_file(lease_file)
|
40,733 | def setup_common_training_handlers(
trainer: Engine,
train_sampler: Optional[DistributedSampler] = None,
to_save: Optional[Mapping] = None,
save_every_iters: int = 1000,
output_path: Optional[str] = None,
lr_scheduler: Optional[Union[ParamScheduler, _LRScheduler]] = None,
with_gpu_stats: bool = False,
output_names: Optional[Iterable[str]] = None,
with_pbars: bool = True,
with_pbar_on_iters: bool = True,
log_every_iters: int = 100,
stop_on_nan: bool = True,
clear_cuda_cache: bool = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any,
) -> None:
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
- :class:`~ignite.handlers.TerminateOnNan`
- handler to setup learning rate scheduling
- :class:`~ignite.handlers.ModelCheckpoint`
- :class:`~ignite.metrics.RunningAverage` on `update_function` output
- Two progress bars on epochs and optionally on iterations
Args:
trainer: trainer engine. Output of trainer's `update_function` should be a dictionary
or sequence or a single tensor.
train_sampler: Optional distributed sampler used to call
`set_epoch` method on epoch started event.
to_save: dictionary with objects to save in the checkpoint. This argument is passed to
:class:`~ignite.handlers.Checkpoint` instance.
save_every_iters: saving interval. By default, `to_save` objects are stored
each 1000 iterations.
output_path: output path to indicate where `to_save` objects are stored. Argument is mutually
exclusive with ``save_handler``.
lr_scheduler: learning rate scheduler
as native torch LRScheduler or ignite's parameter scheduler.
with_gpu_stats: if True, :class:`~ignite.contrib.metrics.handlers.GpuInfo` is attached to the
trainer. This requires `pynvml` package to be installed.
output_names: list of names associated with `update_function` output dictionary.
with_pbars: if True, two progress bars on epochs and optionally on iterations are attached.
Default, True.
with_pbar_on_iters: if True, a progress bar on iterations is attached to the trainer.
Default, True.
log_every_iters: logging interval for :class:`~ignite.contrib.metrics.handlers.GpuInfo` and for
epoch-wise progress bar. Default, 100.
stop_on_nan: if True, :class:`~ignite.handlers.TerminateOnNan` handler is added to the trainer.
Default, True.
clear_cuda_cache: if True, `torch.cuda.empty_cache()` is called every end of epoch.
Default, True.
save_handler: Method or callable
class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Checkpoint` for more details.
Argument is mutually exclusive with ``output_path``.
kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`.
"""
if idist.get_world_size() > 1:
_setup_common_distrib_training_handlers(
trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
else:
if train_sampler is not None and isinstance(train_sampler, DistributedSampler):
warnings.warn(
"Argument train_sampler is a distributed sampler,"
" but either there is no distributed setting or world size is < 2. "
"Train sampler argument will be ignored",
UserWarning,
)
_setup_common_training_handlers(
trainer,
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
| def setup_common_training_handlers(
trainer: Engine,
train_sampler: Optional[DistributedSampler] = None,
to_save: Optional[Mapping] = None,
save_every_iters: int = 1000,
output_path: Optional[str] = None,
lr_scheduler: Optional[Union[ParamScheduler, _LRScheduler]] = None,
with_gpu_stats: bool = False,
output_names: Optional[Iterable[str]] = None,
with_pbars: bool = True,
with_pbar_on_iters: bool = True,
log_every_iters: int = 100,
stop_on_nan: bool = True,
clear_cuda_cache: bool = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any,
) -> None:
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
- :class:`~ignite.handlers.TerminateOnNan`
- handler to setup learning rate scheduling
- :class:`~ignite.handlers.ModelCheckpoint`
- :class:`~ignite.metrics.RunningAverage` on `update_function` output
- Two progress bars on epochs and optionally on iterations
Args:
trainer: trainer engine. Output of trainer's `update_function` should be a dictionary
or sequence or a single tensor.
train_sampler: Optional distributed sampler used to call
`set_epoch` method on epoch started event.
to_save: dictionary with objects to save in the checkpoint. This argument is passed to
:class:`~ignite.handlers.Checkpoint` instance.
save_every_iters: saving interval. By default, `to_save` objects are stored
each 1000 iterations.
output_path: output path to indicate where `to_save` objects are stored. Argument is mutually
exclusive with ``save_handler``.
lr_scheduler: learning rate scheduler
as native torch LRScheduler or ignite's parameter scheduler.
with_gpu_stats: if True, :class:`~ignite.contrib.metrics.GpuInfo` is attached to the
trainer. This requires `pynvml` package to be installed.
output_names: list of names associated with `update_function` output dictionary.
with_pbars: if True, two progress bars on epochs and optionally on iterations are attached.
Default, True.
with_pbar_on_iters: if True, a progress bar on iterations is attached to the trainer.
Default, True.
log_every_iters: logging interval for :class:`~ignite.contrib.metrics.handlers.GpuInfo` and for
epoch-wise progress bar. Default, 100.
stop_on_nan: if True, :class:`~ignite.handlers.TerminateOnNan` handler is added to the trainer.
Default, True.
clear_cuda_cache: if True, `torch.cuda.empty_cache()` is called every end of epoch.
Default, True.
save_handler: Method or callable
class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Checkpoint` for more details.
Argument is mutually exclusive with ``output_path``.
kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`.
"""
if idist.get_world_size() > 1:
_setup_common_distrib_training_handlers(
trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
else:
if train_sampler is not None and isinstance(train_sampler, DistributedSampler):
warnings.warn(
"Argument train_sampler is a distributed sampler,"
" but either there is no distributed setting or world size is < 2. "
"Train sampler argument will be ignored",
UserWarning,
)
_setup_common_training_handlers(
trainer,
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
|
30,700 | def test_module(client: Client) -> str:
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
Args:
client: HelloWorld client
Returns:
'ok' if test passed, anything else will fail the test.
"""
try:
result = client.dehashed_search(
asset_type="vin", value=["test", "test1"], operation="is"
)
except Exception as e:
raise DemistoException(
f"Test failed. please check if Server Url, Email or Api key are correct. \n {e}"
)
else:
if isinstance(result, dict):
return "ok"
else:
return f"Test failed because got unexpected response from api: {result}"
| def test_module(client: Client) -> str:
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
Args:
client: DeHashed client
Returns:
'ok' if test passed, anything else will fail the test.
"""
try:
result = client.dehashed_search(
asset_type="vin", value=["test", "test1"], operation="is"
)
except Exception as e:
raise DemistoException(
f"Test failed. please check if Server Url, Email or Api key are correct. \n {e}"
)
else:
if isinstance(result, dict):
return "ok"
else:
return f"Test failed because got unexpected response from api: {result}"
|
56,268 | def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-v", "--vocab", help="Required. Path to the vocabulary file with tokens",
required=True, type=str)
args.add_argument("-m", "--model", required=True,
help="Required. Path to an .xml file with a trained model "
"or address of model inference service if using OVMS adapter.")
args.add_argument("-i", "--input", help="Required. URL to a page with context",
action='append',
required=True, type=str)
args.add_argument('--adapter', help='Optional. Specify the model adapter. Default is openvino.',
default='openvino', type=str, choices=('openvino', 'ovms'))
args.add_argument("--input_names",
help="Optional. Inputs names for the network. "
"Default values are \"input_ids,attention_mask,token_type_ids\" ",
required=False, type=str, default="input_ids,attention_mask,token_type_ids")
args.add_argument('--layout',
help='Optional. Model inputs layouts. '
'Ex. NCHW or input0:NCHW,input1:NC in case of more than one input.',
type=str, default=None)
args.add_argument("-d", "--device",
help="Optional. Target device to perform inference on."
"Default value is CPU", default="CPU", type=str)
args.add_argument('-nireq', '--num_infer_requests', help='Optional. Number of infer requests.',
default=0, type=int)
args.add_argument('-nstreams', '--num_streams',
help='Optional. Number of streams to use for inference on the CPU or/and GPU in throughput '
'mode (for HETERO and MULTI device cases use format '
'<device1>:<nstreams1>,<device2>:<nstreams2> or just <nstreams>).',
default='', type=str)
args.add_argument('-nthreads', '--num_threads', default=None, type=int,
help='Optional. Number of threads to use for inference on CPU (including HETERO cases).')
args.add_argument('--dynamic_shape', action='store_true', help='Run model with dynamic input sequence. If not provided, input sequence will be padded to max_seq_len')
return parser
| def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-v", "--vocab", help="Required. Path to the vocabulary file with tokens",
required=True, type=str)
args.add_argument("-m", "--model", required=True,
help="Required. Path to an .xml file with a trained model "
"or address of model inference service if using OVMS adapter.")
args.add_argument("-i", "--input", help="Required. URL to a page with context",
action='append',
required=True, type=str)
args.add_argument('--adapter', help='Optional. Specify the model adapter. Default is openvino.',
default='openvino', type=str, choices=('openvino', 'ovms'))
args.add_argument("--input_names",
help="Optional. Inputs names for the network. "
"Default values are \"input_ids,attention_mask,token_type_ids\" ",
required=False, type=str, default="input_ids,attention_mask,token_type_ids")
args.add_argument('--layout',
help='Optional. Model inputs layouts. '
'Ex. NCHW or input0:NCHW,input1:NC in case of more than one input.',
type=str, default=None)
args.add_argument("-d", "--device",
help="Optional. Target device to perform inference on."
"Default value is CPU", default="CPU", type=str)
args.add_argument('-nireq', '--num_infer_requests', help='Optional. Number of infer requests.',
default=0, type=int)
args.add_argument('-nstreams', '--num_streams',
help='Optional. Number of streams to use for inference on the CPU or/and GPU in throughput '
'mode (for HETERO and MULTI device cases use format '
'<device1>:<nstreams1>,<device2>:<nstreams2> or just <nstreams>).',
default='', type=str)
args.add_argument('-nthreads', '--num_threads', default=None, type=int,
help='Optional. Number of threads to use for inference on CPU (including HETERO cases).')
args.add_argument('--dynamic_shape', action='store_true',
help='Optional. Run model with dynamic input sequence. If not provided, input sequence will be padded to max_seq_len')
return parser
|
41,682 | def download_and_extract(
buildpath: Path, packagedir: Path, pkg: Dict[str, Any], args
) -> Path:
srcpath = buildpath / packagedir
if "source" not in pkg:
return srcpath
if "url" in pkg["source"]:
response = request.urlopen(pkg["source"]["url"])
_, parameters = cgi.parse_header(
response.headers.get("Content-Disposition", "")
)
if "filename" in parameters:
tarballname = parameters["filename"]
else:
tarballname = Path(response.geturl()).name
tarballpath = buildpath / tarballname
if not tarballpath.is_file():
try:
os.makedirs(os.path.dirname(tarballpath), exist_ok=True)
with open(tarballpath, "wb") as f:
f.write(response.read())
check_checksum(tarballpath, pkg)
except Exception:
tarballpath.unlink()
raise
if not srcpath.is_dir():
shutil.unpack_archive(str(tarballpath), str(buildpath))
for extension in [
".tar.gz",
".tgz",
".tar",
".tar.bz2",
".tbz2",
".tar.xz",
".txz",
".zip",
]:
if tarballname.endswith(extension):
tarballname = tarballname[: -len(extension)]
break
return buildpath / pkg["source"].get("extract_dir", tarballname)
elif "path" in pkg["source"]:
srcdir = Path(pkg["source"]["path"])
if not srcdir.is_dir():
raise ValueError("'path' must point to a directory that exists")
if not srcpath.is_dir():
shutil.copytree(srcdir, srcpath)
return srcpath
else:
raise ValueError("Incorrect source provided")
| def download_and_extract(
buildpath: Path, packagedir: Path, pkg: Dict[str, Any], args
) -> Path:
srcpath = buildpath / packagedir
if "source" not in pkg:
return srcpath
if "url" in pkg["source"]:
response = request.urlopen(pkg["source"]["url"])
_, parameters = cgi.parse_header(
response.headers.get("Content-Disposition", "")
)
if "filename" in parameters:
tarballname = parameters["filename"]
else:
tarballname = Path(response.geturl()).name
tarballpath = buildpath / tarballname
if not tarballpath.is_file():
try:
os.makedirs(os.path.dirname(tarballpath), exist_ok=True)
with open(tarballpath, "wb") as f:
f.write(response.read())
check_checksum(tarballpath, pkg)
except Exception:
tarballpath.unlink()
raise
if not srcpath.is_dir():
shutil.unpack_archive(str(tarballpath), str(buildpath))
for extension in [
".tar.gz",
".tgz",
".tar",
".tar.bz2",
".tbz2",
".tar.xz",
".txz",
".zip",
]:
if tarballname.endswith(extension):
tarballname = tarballname[: -len(extension)]
break
return buildpath / pkg["source"].get("extract_dir", tarballname)
elif "path" in pkg["source"]:
srcdir = Path(pkg["source"]["path"])
if not srcdir.is_dir():
raise ValueError(f"path={srcdir} must point to a directory that exists")
if not srcpath.is_dir():
shutil.copytree(srcdir, srcpath)
return srcpath
else:
raise ValueError("Incorrect source provided")
|
58,695 | def get_test_stories(
processor: "MessageProcessor",
conversation_id: Text,
until_time: Optional[float],
fetch_all_sessions: bool = False,
) -> Text:
"""Retrieves test stories from `processor` for all conversation sessions for
`conversation_id`.
Args:
processor: An instance of `MessageProcessor`.
conversation_id: Conversation ID to fetch stories for.
until_time: Timestamp up to which to include events.
fetch_all_sessions: Whether to fetch stories for all conversation sessions.
If `False`, only the last conversation session is retrieved.
Returns:
The stories for `conversation_id` in test format.
"""
if fetch_all_sessions:
trackers: List[
DialogueStateTracker
] = processor.get_trackers_for_all_conversation_sessions(conversation_id)
else:
trackers = [processor.get_tracker(conversation_id)]
if until_time is not None:
trackers = [tracker.travel_back_in_time(until_time) for tracker in trackers]
# keep only non-empty trackers
trackers = [tracker for tracker in trackers if len(tracker.events)]
logger.debug(
f"Fetched trackers for {len(trackers)} conversation sessions "
f"for conversation ID {conversation_id}."
)
story_steps = []
more_than_one_story = len(trackers) > 1
for i, tracker in enumerate(trackers, 1):
for story_step in tracker.as_story().story_steps:
story_step.block_name = conversation_id
if more_than_one_story:
story_step.block_name += f", story {i}"
story_steps.append(story_step)
return YAMLStoryWriter().dumps(story_steps, is_test_story=True)
| def get_test_stories(
processor: "MessageProcessor",
conversation_id: Text,
until_time: Optional[float],
fetch_all_sessions: bool = False,
) -> Text:
"""Retrieves test stories from `processor` for all conversation sessions for
`conversation_id`.
Args:
processor: An instance of `MessageProcessor`.
conversation_id: Conversation ID to fetch stories for.
until_time: Timestamp up to which to include events.
fetch_all_sessions: Whether to fetch stories for all conversation sessions.
If `False`, only the last conversation session is retrieved.
Returns:
The stories for `conversation_id` in test format.
"""
if fetch_all_sessions:
trackers: List[
DialogueStateTracker
] = processor.get_trackers_for_all_conversation_sessions(conversation_id)
else:
trackers = [processor.get_tracker(conversation_id)]
if until_time is not None:
trackers = [tracker.travel_back_in_time(until_time) for tracker in trackers]
# keep only non-empty trackers
trackers = [tracker for tracker in trackers if len(tracker.events)]
logger.debug(
f"Fetched trackers for {len(trackers)} conversation sessions "
f"for conversation ID {conversation_id}."
)
story_steps = []
more_than_one_story = len(trackers) > 1
for i, tracker in enumerate(trackers, 1):
if more_than_one_story:
tracker.sender_id = f"{tracker.sender_id}, story {i}"
story_steps += tracker.as_story().story_steps
return YAMLStoryWriter().dumps(story_steps, is_test_story=True)
|
49,154 | def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
force_grouping=False, use_l10n=None):
"""
Get a number (as a number or string), and return it as a string,
using formats defined as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator.
For non-uniform digit grouping, it can be a sequence with the number
of digit group sizes following the format used by the Python locale
module in locale.localeconv() LC_NUMERIC grouping (e.g. (3, 2, 0)).
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = (use_l10n or (use_l10n is None and settings.USE_L10N)) and settings.USE_THOUSAND_SEPARATOR
use_grouping = use_grouping or force_grouping
use_grouping = use_grouping and grouping != 0
# Make the common case fast
if isinstance(number, int) and (not use_grouping or grouping == 3):
if not use_grouping:
number = str(number)
elif number >= 1000 or number <= -1000:
number = builtins.format(number, ',')
if not thousand_sep == ',':
number = number.replace(',', thousand_sep)
if decimal_pos:
return number + decimal_sep + '0' * decimal_pos
return number
if isinstance(number, Decimal):
if decimal_pos is not None:
# If the provided number is too small to affect any of the visible
# decimal places, consider it equal to '0'.
cutoff = Decimal('0.' + '1'.rjust(decimal_pos, '0'))
if abs(number) < cutoff:
number = Decimal('0')
# Format values with more than 200 digits (an arbitrary cutoff) using
# scientific notation to avoid high memory usage in {:f}'.format().
_, digits, exponent = number.as_tuple()
if abs(exponent) + len(digits) > 200:
number = '{:e}'.format(number)
coefficient, exponent = number.split('e')
# Format the coefficient.
coefficient = format(
coefficient, decimal_sep, decimal_pos, grouping,
thousand_sep, force_grouping, use_l10n,
)
return '{}e{}'.format(coefficient, exponent)
else:
str_number = '{:f}'.format(number)
else:
str_number = str(number)
# sign
sign = ''
if str_number[0] == '-':
sign = '-'
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos is not None:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos is not None:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
dec_part = dec_part and decimal_sep + dec_part
# grouping
if use_grouping:
# Use the builtin format where possible
if grouping == 3:
if len(int_part) > 3:
int_part = builtins.format(int(int_part), ",")
if not thousand_sep == ",":
int_part = int_part.replace(",", thousand_sep)
return sign + int_part + dec_part
try:
# if grouping is a sequence
intervals = list(grouping)
except TypeError:
# grouping is a single value
intervals = [grouping, 0]
active_interval = intervals.pop(0)
int_part_gd = ''
cnt = 0
for digit in reversed(int_part):
if cnt and cnt == active_interval:
if intervals:
active_interval = intervals.pop(0) or active_interval
int_part_gd += thousand_sep[::-1]
cnt = 0
int_part_gd += digit
cnt += 1
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
| def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
force_grouping=False, use_l10n=None):
"""
Get a number (as a number or string), and return it as a string,
using formats defined as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator.
For non-uniform digit grouping, it can be a sequence with the number
of digit group sizes following the format used by the Python locale
module in locale.localeconv() LC_NUMERIC grouping (e.g. (3, 2, 0)).
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = (use_l10n or (use_l10n is None and settings.USE_L10N)) and settings.USE_THOUSAND_SEPARATOR
use_grouping = use_grouping or force_grouping
use_grouping = use_grouping and grouping != 0
# Make the common case fast
if isinstance(number, int) and (not use_grouping or grouping == 3):
if not use_grouping:
number = str(number)
elif number >= 1000 or number <= -1000:
# format(int, ',') returns a string with a ',' as a thousand separator, not matter the locale
number = f'{number:-_}'
if not thousand_sep == ',':
number = number.replace(',', thousand_sep)
if decimal_pos:
return number + decimal_sep + '0' * decimal_pos
return number
if isinstance(number, Decimal):
if decimal_pos is not None:
# If the provided number is too small to affect any of the visible
# decimal places, consider it equal to '0'.
cutoff = Decimal('0.' + '1'.rjust(decimal_pos, '0'))
if abs(number) < cutoff:
number = Decimal('0')
# Format values with more than 200 digits (an arbitrary cutoff) using
# scientific notation to avoid high memory usage in {:f}'.format().
_, digits, exponent = number.as_tuple()
if abs(exponent) + len(digits) > 200:
number = '{:e}'.format(number)
coefficient, exponent = number.split('e')
# Format the coefficient.
coefficient = format(
coefficient, decimal_sep, decimal_pos, grouping,
thousand_sep, force_grouping, use_l10n,
)
return '{}e{}'.format(coefficient, exponent)
else:
str_number = '{:f}'.format(number)
else:
str_number = str(number)
# sign
sign = ''
if str_number[0] == '-':
sign = '-'
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos is not None:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos is not None:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
dec_part = dec_part and decimal_sep + dec_part
# grouping
if use_grouping:
# Use the builtin format where possible
if grouping == 3:
if len(int_part) > 3:
int_part = builtins.format(int(int_part), ",")
if not thousand_sep == ",":
int_part = int_part.replace(",", thousand_sep)
return sign + int_part + dec_part
try:
# if grouping is a sequence
intervals = list(grouping)
except TypeError:
# grouping is a single value
intervals = [grouping, 0]
active_interval = intervals.pop(0)
int_part_gd = ''
cnt = 0
for digit in reversed(int_part):
if cnt and cnt == active_interval:
if intervals:
active_interval = intervals.pop(0) or active_interval
int_part_gd += thousand_sep[::-1]
cnt = 0
int_part_gd += digit
cnt += 1
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
|
31,855 | def ignore_ransomware_anomaly_command(client: Client, args: Dict[str, Any]) -> str:
"""Ignore detected anomalous object on Helios.
:type client: ``Client``
:param Client: cohesity helios client to use.
:type args: ``Dict[str, Any]``
:param args: Dictionary with ignore anomaly parameters.
:return: success message of the ignore anomaly operation.
:rtype: ``str``
"""
# Filter ransomware alert for given object name.
alert_id = ''
object_name = args.get('object_name')
demisto.debug("Performing ignore anomaly operation for object {name}".format(name=object_name))
resp = client.get_ransomware_alerts()
for alert in resp:
property_dict = _get_property_dict(alert['propertyList'])
if property_dict.get('object', "") == object_name:
alert_id = alert.get('id')
if alert_id == '':
raise ValueError('No anomalous object found by given name')
# Suppress ransomware alert.
client.suppress_ransomware_alert_by_id(alert_id)
return "Ignored object {name}".format(name=object_name)
| def ignore_ransomware_anomaly_command(client: Client, args: Dict[str, Any]) -> str:
"""Ignore detected anomalous object on Helios.
:type client: ``Client``
:param Client: cohesity helios client to use.
:type args: ``Dict[str, Any]``
:param args: Dictionary with ignore anomaly parameters.
:return: success message of the ignore anomaly operation.
:rtype: ``str``
"""
# Filter ransomware alert for given object name.
alert_id = ''
object_name = args.get('object_name')
demisto.debug("Performing ignore anomaly operation for object {name}".format(name=object_name))
resp = client.get_ransomware_alerts()
for alert in resp:
property_dict = _get_property_dict(alert['propertyList'])
if property_dict.get('object', "") == object_name:
alert_id = alert.get('id')
if alert_id == '':
raise ValueError(f'CohesityHelios error: no anomalous object found by the given name: {object_name}. ')
# Suppress ransomware alert.
client.suppress_ransomware_alert_by_id(alert_id)
return "Ignored object {name}".format(name=object_name)
|
7,631 | def update_all_packages(
roots, destdir=None, dry_run=False, stable_only=True, blacklist_file=None
):
all_packages = itertools.chain(*[listdir(r) for r in roots])
skip_packages = set()
if blacklist_file:
skip_packages = set(read_lines(blacklist_file))
print(
'Skipping update of blacklisted packages (listed in "{}"): {}'.format(
blacklist_file, ", ".join(sorted(skip_packages))
)
)
packages = frozenset(
[pkg for pkg in all_packages if pkg.pkgname not in skip_packages]
)
update(packages, destdir, dry_run, stable_only)
| def update_all_packages(
roots, destdir=None, dry_run=False, stable_only=True, blacklist_file=None
):
all_packages = itertools.chain.from_iterable(listdir(r) for r in roots)
skip_packages = set()
if blacklist_file:
skip_packages = set(read_lines(blacklist_file))
print(
'Skipping update of blacklisted packages (listed in "{}"): {}'.format(
blacklist_file, ", ".join(sorted(skip_packages))
)
)
packages = frozenset(
[pkg for pkg in all_packages if pkg.pkgname not in skip_packages]
)
update(packages, destdir, dry_run, stable_only)
|
32,318 | def find_last_process_list_script(script_results: Union[list, dict]):
"""
Args:
script_results (List | dict): script results after running XDRIR script
Returns:
list | None: if a proper result was find return the _return_value else None
"""
if not script_results:
return None
if not isinstance(script_results, list):
script_results = [script_results]
for script_result in reversed(script_results):
if not (results := script_result.get('results', [])):
continue
if not isinstance(results, list):
results = [results]
for result in reversed(results):
if not (_return_value := result.get('_return_value', [])):
continue
if detect_process_field(_return_value[0]):
return _return_value
return None
| def find_last_process_list_script(script_results: Union[list, dict]):
"""
Args:
script_results (List | dict): script results after running XDRIR script
Returns:
list | None: if a proper result was found return the _return_value else None
"""
if not script_results:
return None
if not isinstance(script_results, list):
script_results = [script_results]
for script_result in reversed(script_results):
if not (results := script_result.get('results', [])):
continue
if not isinstance(results, list):
results = [results]
for result in reversed(results):
if not (_return_value := result.get('_return_value', [])):
continue
if detect_process_field(_return_value[0]):
return _return_value
return None
|
12,905 | def update_order_prices(order, discounts):
"""Update prices in order with given discounts and proper taxes."""
manager = get_extensions_manager()
for line in order: # type: OrderLine
if line.variant:
unit_price = line.variant.get_price(discounts)
unit_price = TaxedMoney(unit_price, unit_price)
line.unit_price = unit_price
line.save(
update_fields=[
"currency",
"unit_price_net_amount",
"unit_price_gross_amount",
]
)
price = manager.calculate_order_line_unit(line)
if price != line.unit_price:
line.unit_price = price
if price.tax and price.net:
line.tax_rate = price.tax / price.net
line.save()
if order.shipping_method:
order.shipping_price = manager.calculate_order_shipping(order)
order.save(update_fields=["shipping_price"])
recalculate_order(order)
| def update_order_prices(order, discounts):
"""Update prices in order with given discounts and proper taxes."""
manager = get_extensions_manager()
for line in order: # type: OrderLine
if line.variant:
unit_price = line.variant.get_price(discounts)
unit_price = TaxedMoney(unit_price, unit_price)
line.unit_price = unit_price
line.save(
update_fields=[
"currency",
"unit_price_net_amount",
"unit_price_gross_amount",
]
)
price = manager.calculate_order_line_unit(line)
if price != line.unit_price:
line.unit_price = price
if price.tax and price.net:
line.tax_rate = price.tax / price.net
line.save()
if order.shipping_method:
order.shipping_price = manager.calculate_order_shipping(order)
order.save(update_fields=["shipping_price_net_amount", "shipping_price_gross_amount", "currency"])
recalculate_order(order)
|
27,681 | def test_function_item_obj_is_instance(testdir):
"""item.obj should be a bound method on unittest.TestCase function items (#5390)."""
testdir.makeconftest(
"""
def pytest_runtest_makereport(item, call):
if call.when == 'call':
class_ = item.parent.obj
assert isinstance(item.obj.__self__, class_)
"""
)
testdir.makepyfile(
"""
import unittest
class Test(unittest.TestCase):
def test_foo(self):
pass
"""
)
result = testdir.runpytest_inprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
| def test_function_item_obj_is_instance(testdir):
"""item.obj should be a bound method on unittest.TestCase function items (#5390)."""
testdir.makeconftest(
"""
def pytest_runtest_makereport(item, call):
if call.when == 'call':
class_ = item.parent.obj
assert isinstance(item.obj.__self__, class_)
"""
)
testdir.makepyfile(
"""
import unittest
class Test(unittest.TestCase):
def test_foo(self):
pass
"""
)
result = testdir.runpytest_inprocess()
result.stdout.fnmatch_lines(["* 1 passed in*"])
|
26,358 | def main() -> None:
parser = argparse.ArgumentParser(
description="""Generate baseline stubs automatically for an installed pip package
using stubgen. Also run black and isort. If the name of
the project is different from the runtime Python package name, you may
need to use --package (example: --package yaml PyYAML)."""
)
parser.add_argument("project", help="name of PyPI project for which to generate stubs under stubs/")
parser.add_argument("--package", help="generate stubs for this Python package (default is autodetected)")
args = parser.parse_args()
project = args.project
package = args.package
if not re.match(r"[a-zA-Z0-9-_.]+$", project):
sys.exit(f"Invalid character in project name: {project!r}")
if not package:
package = project # default
# Try to find which packages are provided by the project
# Use default if that fails or if several packages are found
#
# The importlib.metadata module is used for projects whose name is different
# from the runtime Python package name (example: PyYAML/yaml)
if sys.version_info >= (3, 8):
dist = distribution(project).read_text("top_level.txt")
if dist is not None:
packages = [name for name in dist.split() if not name.startswith("_")]
if len(packages) == 1:
package = packages[0]
print(f'Using detected package "{package}" for project "{project}"', file=sys.stderr)
print("Suggestion: Try again with --package argument if that's not what you wanted", file=sys.stderr)
if not os.path.isdir("stubs") or not os.path.isdir("stdlib"):
sys.exit("Error: Current working directory must be the root of typeshed repository")
# Get normalized project name and version of installed package.
info = get_installed_package_info(project)
if info is None:
print(f'Error: "{project}" is not installed', file=sys.stderr)
print("", file=sys.stderr)
print(f'Suggestion: Run "python3 -m pip install {project}" and try again', file=sys.stderr)
sys.exit(1)
project, version = info
stub_dir = os.path.join("stubs", project).replace("\\", "/")
if os.path.exists(stub_dir):
sys.exit(f"Error: {stub_dir} already exists (delete it first)")
run_stubgen(package)
# Stubs were generated under out/. Copy them to stubs/.
copy_stubs("out", package, stub_dir)
run_isort(stub_dir)
run_black(stub_dir)
create_metadata(stub_dir, version)
# Since the generated stubs won't have many type annotations, we
# have to exclude them from strict pyright checks.
add_pyright_exclusion(stub_dir)
print("\nDone!\n\nSuggested next steps:")
print(f" 1. Manually review the generated stubs in {stub_dir}")
print(f' 2. Run "MYPYPATH={stub_dir} python3 -m mypy.stubtest {package}" to check the stubs against runtime')
print(f' 3. Run "mypy {stub_dir}" to check for errors')
print(f' 4. Run "black {stub_dir}" and "isort {stub_dir}" (if you\'ve made code changes)')
print(f' 5. Run "flake8 {stub_dir}" to check for e.g. unused imports')
print(" 6. Commit the changes on a new branch and create a typeshed PR")
| def main() -> None:
parser = argparse.ArgumentParser(
description="""Generate baseline stubs automatically for an installed pip package
using stubgen. Also run black and isort. If the name of
the project is different from the runtime Python package name, you may
need to use --package (example: --package yaml PyYAML)."""
)
parser.add_argument("project", help="name of PyPI project for which to generate stubs under stubs/")
parser.add_argument("--package", help="generate stubs for this Python package (default is autodetected)")
args = parser.parse_args()
project = args.project
package = args.package
if not re.match(r"[a-zA-Z0-9-_.]+$", project):
sys.exit(f"Invalid character in project name: {project!r}")
if not package:
package = project # default
# Try to find which packages are provided by the project
# Use default if that fails or if several packages are found
#
# The importlib.metadata module is used for projects whose name is different
# from the runtime Python package name (example: PyYAML/yaml)
if sys.version_info >= (3, 8):
dist = distribution(project).read_text("top_level.txt")
if dist is not None:
packages = [name for name in dist.split() if not name.startswith("_")]
if len(packages) == 1:
package = packages[0]
print(f'Using detected package "{package}" for project "{project}"', file=sys.stderr)
print("Suggestion: Try again with --package argument if that's not what you wanted", file=sys.stderr)
if not os.path.isdir("stubs") or not os.path.isdir("stdlib"):
sys.exit("Error: Current working directory must be the root of typeshed repository")
# Get normalized project name and version of installed package.
info = get_installed_package_info(project)
if info is None:
print(f'Error: "{project}" is not installed', file=sys.stderr)
print("", file=sys.stderr)
print(f'Suggestion: Run "python3 -m pip install {project}" and try again', file=sys.stderr)
sys.exit(1)
project, version = info
stub_dir = f"stubs/{project}"
if os.path.exists(stub_dir):
sys.exit(f"Error: {stub_dir} already exists (delete it first)")
run_stubgen(package)
# Stubs were generated under out/. Copy them to stubs/.
copy_stubs("out", package, stub_dir)
run_isort(stub_dir)
run_black(stub_dir)
create_metadata(stub_dir, version)
# Since the generated stubs won't have many type annotations, we
# have to exclude them from strict pyright checks.
add_pyright_exclusion(stub_dir)
print("\nDone!\n\nSuggested next steps:")
print(f" 1. Manually review the generated stubs in {stub_dir}")
print(f' 2. Run "MYPYPATH={stub_dir} python3 -m mypy.stubtest {package}" to check the stubs against runtime')
print(f' 3. Run "mypy {stub_dir}" to check for errors')
print(f' 4. Run "black {stub_dir}" and "isort {stub_dir}" (if you\'ve made code changes)')
print(f' 5. Run "flake8 {stub_dir}" to check for e.g. unused imports')
print(" 6. Commit the changes on a new branch and create a typeshed PR")
|
31,664 | def circleci_workflow_last_runs_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve jobs list from CircleCI workflow.
Args:
client (Client): Client to perform the API calls
args (Dict[str, Any]): XSOAR arguments.
- 'workflow_name' (str): Name of workflow to retrieve its last runs details.
- 'vc_type' (str): VC type. One of 'github', 'bitbucket'.
- 'organization' (str): Organization to retrieve workflow last runs from.
Defaults to artifacts parameter is none is given.
- 'project' (str): Project to retrieve workflow last runs from. Defaults to project parameter is none is given.
- 'limit' (int): Maximum number of results to return.
Returns:
(CommandResults).
"""
vc_type, organization, project, limit = get_common_arguments(client, args)
workflow_name: str = args.get('workflow_name', 'nightly')
response = get_response_with_pagination(client.get_last_workflow_runs,
[vc_type, organization, project, workflow_name], limit)
return CommandResults(
outputs_prefix='CircleCI.WorkflowRun',
outputs_key_field='id',
readable_output=tableToMarkdown(f'CircleCI Workflow {workflow_name} Last Runs', response, removeNull=True,
headerTransform=camelize_string),
outputs=response
)
| def circleci_workflow_last_runs_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve jobs list from CircleCI workflow.
Args:
client (Client): Client to perform the API calls
args (Dict[str, Any]): XSOAR arguments.
- 'workflow_name' (str): Name of workflow to retrieve its last runs details.
- 'vc_type' (str): VC type. One of 'github', 'bitbucket'.
- 'organization' (str): Organization to retrieve workflow last runs from.
Defaults to artifacts parameter is none is given.
- 'project' (str): Project to retrieve workflow last runs from. Defaults to project parameter is none is given.
- 'limit' (int): Maximum number of results to return.
Returns:
(CommandResults).
"""
vc_type, organization, project, limit = get_common_arguments(client, args)
workflow_name: str = args.get('workflow_name')
response = get_response_with_pagination(client.get_last_workflow_runs,
[vc_type, organization, project, workflow_name], limit)
return CommandResults(
outputs_prefix='CircleCI.WorkflowRun',
outputs_key_field='id',
readable_output=tableToMarkdown(f'CircleCI Workflow {workflow_name} Last Runs', response, removeNull=True,
headerTransform=camelize_string),
outputs=response
)
|
4,871 | def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):
"""
Return a new transform with an added offset.
Parameters
----------
trans : `Transform` subclass
Any transform, to which offset will be applied.
fig : `.Figure`, default: None
Current figure. It can be None if *units* are 'dots'.
x, y : float, default: 0.0
The offset to apply.
units : {'inches', 'points', 'dots'}, default: 'inches'
Units of the offset.
Returns
-------
`Transform` subclass
Transform with applied offset.
"""
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
elif units == 'inches':
pass
else:
cbook._check_in_list(['dots', 'points', 'inches'], units=units)
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
| def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):
"""
Return a new transform with an added offset.
Parameters
----------
trans : `Transform` subclass
Any transform, to which offset will be applied.
fig : `~matplotlib.figure.Figure`, default: None
Current figure. It can be None if *units* are 'dots'.
x, y : float, default: 0.0
The offset to apply.
units : {'inches', 'points', 'dots'}, default: 'inches'
Units of the offset.
Returns
-------
`Transform` subclass
Transform with applied offset.
"""
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
elif units == 'inches':
pass
else:
cbook._check_in_list(['dots', 'points', 'inches'], units=units)
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
|
606 | def find_ucr_tables_for_deleted_domains():
deleted_domain_names = Domain.get_deleted_domain_names()
connection_name = ConnectionManager().get_django_db_alias(UCR_ENGINE_ID)
table_names = connections[connection_name].introspection.table_names()
ucr_table_names = [name for name in table_names if
name.startswith(UCR_TABLE_PREFIX) or name.startswith(LEGACY_UCR_TABLE_PREFIX)]
deleted_domains_to_tables = defaultdict(list)
for ucr_table_name in ucr_table_names:
table_domain = _get_domain_for_table_name(ucr_table_name)
if table_domain in deleted_domain_names:
deleted_domains_to_tables[table_domain].append(ucr_table_name)
return deleted_domains_to_tables
| def find_ucr_tables_for_deleted_domains():
deleted_domain_names = set(Domain.get_deleted_domain_names())
connection_name = ConnectionManager().get_django_db_alias(UCR_ENGINE_ID)
table_names = connections[connection_name].introspection.table_names()
ucr_table_names = [name for name in table_names if
name.startswith(UCR_TABLE_PREFIX) or name.startswith(LEGACY_UCR_TABLE_PREFIX)]
deleted_domains_to_tables = defaultdict(list)
for ucr_table_name in ucr_table_names:
table_domain = _get_domain_for_table_name(ucr_table_name)
if table_domain in deleted_domain_names:
deleted_domains_to_tables[table_domain].append(ucr_table_name)
return deleted_domains_to_tables
|
42,116 | def save_static_image(figure: Union[go.Figure, Axes, np.ndarray]) -> None:
if isinstance(figure, go.Figure):
with tempfile.TemporaryDirectory() as td:
figure.write_image(td + "tmp.png")
else:
plt.savefig(BytesIO())
| def save_static_image(figure: Union[go.Figure, Axes, np.ndarray]) -> None:
if isinstance(figure, go.Figure):
figure.write_image(BytesIO())
else:
plt.savefig(BytesIO())
|
50,365 | def safe_next_down(value, width, allow_subnormal):
smallest_normal = width_smallest_normals[width]
if allow_subnormal or value <= -smallest_normal or value > smallest_normal:
return next_down(value, width)
else:
if 0.0 < value <= smallest_normal:
return 0.0
else:
return -smallest_normal
| def next_down_normal(value, width, allow_subnormal):
value = next_down(value, width)
if (not allow_subnormal) and 0 < abs(value) < width_smallest_normals[width]:
return 0.0 if value > 0 else -width_smallest_normals[width]
return value
|
7,173 | def test_cell():
""" Test that "page" image can be loaded. """
data.page()
| def test_cell():
""" Test that "page" image can be loaded. """
data.cell()
|
47,886 | def main():
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
mask_rcnn_model_xml = args.mask_rcnn_model
mask_rcnn_model_bin = os.path.splitext(mask_rcnn_model_xml)[0] + '.bin'
text_enc_model_xml = args.text_enc_model
text_enc_model_bin = os.path.splitext(text_enc_model_xml)[0] + '.bin'
text_dec_model_xml = args.text_dec_model
text_dec_model_bin = os.path.splitext(text_dec_model_xml)[0] + '.bin'
# Plugin initialization for specified device and load extensions library if specified.
log.info('Creating Inference Engine...')
ie = IECore()
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, 'CPU')
# Read IR
log.info('Loading network files:\n\t{}\n\t{}'.format(mask_rcnn_model_xml, mask_rcnn_model_bin))
mask_rcnn_net = IENetwork(model=mask_rcnn_model_xml, weights=mask_rcnn_model_bin)
log.info('Loading network files:\n\t{}\n\t{}'.format(text_enc_model_xml, text_enc_model_bin))
text_enc_net = IENetwork(model=text_enc_model_xml, weights=text_enc_model_bin)
log.info('Loading network files:\n\t{}\n\t{}'.format(text_dec_model_xml, text_dec_model_bin))
text_dec_net = IENetwork(model=text_dec_model_xml, weights=text_dec_model_bin)
if 'CPU' in args.device:
supported_layers = ie.query_network(mask_rcnn_net, 'CPU')
not_supported_layers = [l for l in mask_rcnn_net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error('Following layers are not supported by the plugin for specified device {}:\n {}'.
format(args.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
required_input_keys = {'im_data', 'im_info'}
assert required_input_keys == set(mask_rcnn_net.inputs.keys()), \
'Demo supports only topologies with the following input keys: {}'.format(', '.join(required_input_keys))
required_output_keys = {'boxes', 'scores', 'classes', 'raw_masks', 'text_features'}
assert required_output_keys.issubset(mask_rcnn_net.outputs.keys()), \
'Demo supports only topologies with the following output keys: {}'.format(', '.join(required_output_keys))
n, c, h, w = mask_rcnn_net.inputs['im_data'].shape
assert n == 1, 'Only batch 1 is supported by the demo application'
log.info('Loading IR to the plugin...')
mask_rcnn_exec_net = ie.load_network(network=mask_rcnn_net, device_name=args.device, num_requests=2)
text_enc_exec_net = ie.load_network(network=text_enc_net, device_name=args.device)
text_dec_exec_net = ie.load_network(network=text_dec_net, device_name=args.device)
for name, input in text_dec_exec_net.inputs.items():
if len(input.shape) == 3:
if input.shape[1] == 1:
trd_input_prev_hidden = name
else:
trd_input_encoder_outputs = name
elif len(input.shape) == 1:
trd_input_prev_symbol = name
for name, output in text_dec_exec_net.outputs.items():
if len(output.shape) == 3:
trd_output_cur_hidden = name
elif len(output.shape) == 2:
trd_output_symbols_distr = name
hidden_shape = text_dec_net.inputs[trd_input_prev_hidden].shape
del mask_rcnn_net
del text_enc_net
del text_dec_net
try:
input_source = int(args.input_source)
except ValueError:
input_source = args.input_source
if os.path.isdir(input_source):
cap = FolderCapture(input_source)
else:
cap = cv2.VideoCapture(input_source)
if not cap.isOpened():
log.error('Failed to open "{}"'.format(args.input_source))
if isinstance(cap, cv2.VideoCapture):
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
if args.no_track:
tracker = None
else:
tracker = StaticIOUTracker()
visualizer = Visualizer(['__background__', 'text'], show_boxes=args.show_boxes, show_scores=args.show_scores)
render_time = 0
log.info('Starting inference...')
print("To close the application, press 'CTRL+C' here or switch to the output window and press ESC key")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if not args.keep_aspect_ratio:
# Resize the image to a target size.
scale_x = w / frame.shape[1]
scale_y = h / frame.shape[0]
input_image = cv2.resize(frame, (w, h))
else:
# Resize the image to keep the same aspect ratio and to fit it to a window of a target size.
scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1])
input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y)
input_image_size = input_image.shape[:2]
input_image = np.pad(input_image, ((0, h - input_image_size[0]),
(0, w - input_image_size[1]),
(0, 0)),
mode='constant', constant_values=0)
# Change data layout from HWC to CHW.
input_image = input_image.transpose((2, 0, 1))
input_image = input_image.reshape((n, c, h, w)).astype(np.float32)
input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32)
# Run the net.
inf_start = time.time()
outputs = mask_rcnn_exec_net.infer({'im_data': input_image, 'im_info': input_image_info})
# Parse detection results of the current request
boxes = outputs['boxes']
scores = outputs['scores']
classes = outputs['classes'].astype(np.uint32)
raw_masks = outputs['raw_masks']
text_features = outputs['text_features']
# Filter out detections with low confidence.
detections_filter = scores > args.prob_threshold
scores = scores[detections_filter]
classes = classes[detections_filter]
boxes = boxes[detections_filter]
raw_masks = raw_masks[detections_filter]
text_features = text_features[detections_filter]
boxes[:, 0::2] /= scale_x
boxes[:, 1::2] /= scale_y
masks = []
for box, cls, raw_mask in zip(boxes, classes, raw_masks):
raw_cls_mask = raw_mask[cls, ...]
mask = segm_postprocess(box, raw_cls_mask, frame.shape[0], frame.shape[1])
masks.append(mask)
texts = []
for feature in text_features:
feature = text_enc_exec_net.infer({'input': feature})
feature = list(feature.values())[0]
feature = np.reshape(feature, (feature.shape[0], feature.shape[1], -1))
feature = np.transpose(feature, (0, 2, 1))
hidden = np.zeros(hidden_shape)
prev_symbol_index = np.ones((1,)) * SOS_INDEX
text = ''
for i in range(MAX_SEQ_LEN):
decoder_output = text_dec_exec_net.infer({
trd_input_prev_symbol: prev_symbol_index,
trd_input_prev_hidden: hidden,
trd_input_encoder_outputs: feature})
symbols_distr = decoder_output[trd_output_symbols_distr]
prev_symbol_index = int(np.argmax(symbols_distr, axis=1))
if prev_symbol_index == EOS_INDEX:
break
text += args.alphabet[prev_symbol_index]
hidden = decoder_output[trd_output_cur_hidden]
texts.append(text)
inf_end = time.time()
inf_time = inf_end - inf_start
render_start = time.time()
if len(boxes) and args.raw_output_message:
log.info('Detected boxes:')
log.info(' Class ID | Confidence | XMIN | YMIN | XMAX | YMAX ')
for box, cls, score, mask in zip(boxes, classes, scores, masks):
log.info('{:>10} | {:>10f} | {:>8.2f} | {:>8.2f} | {:>8.2f} | {:>8.2f} '.format(cls, score, *box))
# Get instance track IDs.
masks_tracks_ids = None
if tracker is not None:
masks_tracks_ids = tracker(masks, classes)
# Visualize masks.
frame = visualizer(frame, boxes, classes, scores, masks, texts, masks_tracks_ids)
# Draw performance stats.
inf_time_message = 'Inference and post-processing time: {:.3f} ms'.format(inf_time * 1000)
render_time_message = 'OpenCV rendering time: {:.3f} ms'.format(render_time * 1000)
cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
cv2.putText(frame, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
# Print performance counters.
if args.perf_counts:
perf_counts = mask_rcnn_exec_net.requests[0].get_perf_counts()
log.info('Performance counters:')
print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format('name', 'layer_type', 'exet_type', 'status',
'real_time, us'))
for layer, stats in perf_counts.items():
print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
if not args.no_show:
# Show resulting image.
cv2.imshow('Results', frame)
render_end = time.time()
render_time = render_end - render_start
if not args.no_show:
key = cv2.waitKey(args.delay)
esc_code = 27
if key == esc_code:
break
cv2.destroyAllWindows()
cap.release()
| def main():
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
mask_rcnn_model_xml = args.mask_rcnn_model
mask_rcnn_model_bin = os.path.splitext(mask_rcnn_model_xml)[0] + '.bin'
text_enc_model_xml = args.text_enc_model
text_enc_model_bin = os.path.splitext(text_enc_model_xml)[0] + '.bin'
text_dec_model_xml = args.text_dec_model
text_dec_model_bin = os.path.splitext(text_dec_model_xml)[0] + '.bin'
# Plugin initialization for specified device and load extensions library if specified.
log.info('Creating Inference Engine...')
ie = IECore()
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, 'CPU')
# Read IR
log.info('Loading network files:\n\t{}\n\t{}'.format(mask_rcnn_model_xml, mask_rcnn_model_bin))
mask_rcnn_net = IENetwork(model=mask_rcnn_model_xml, weights=mask_rcnn_model_bin)
log.info('Loading network files:\n\t{}\n\t{}'.format(text_enc_model_xml, text_enc_model_bin))
text_enc_net = IENetwork(model=text_enc_model_xml, weights=text_enc_model_bin)
log.info('Loading network files:\n\t{}\n\t{}'.format(text_dec_model_xml, text_dec_model_bin))
text_dec_net = IENetwork(model=text_dec_model_xml, weights=text_dec_model_bin)
if 'CPU' in args.device:
supported_layers = ie.query_network(mask_rcnn_net, 'CPU')
not_supported_layers = [l for l in mask_rcnn_net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error('Following layers are not supported by the plugin for specified device {}:\n {}'.
format(args.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
required_input_keys = {'im_data', 'im_info'}
assert required_input_keys == set(mask_rcnn_net.inputs.keys()), \
'Demo supports only topologies with the following input keys: {}'.format(', '.join(required_input_keys))
required_output_keys = {'boxes', 'scores', 'classes', 'raw_masks', 'text_features'}
assert required_output_keys.issubset(mask_rcnn_net.outputs.keys()), \
'Demo supports only topologies with the following output keys: {}'.format(', '.join(required_output_keys))
n, c, h, w = mask_rcnn_net.inputs['im_data'].shape
assert n == 1, 'Only batch 1 is supported by the demo application'
log.info('Loading IR to the plugin...')
mask_rcnn_exec_net = ie.load_network(network=mask_rcnn_net, device_name=args.device, num_requests=2)
text_enc_exec_net = ie.load_network(network=text_enc_net, device_name=args.device)
text_dec_exec_net = ie.load_network(network=text_dec_net, device_name=args.device)
for name, input in text_dec_exec_net.inputs.items():
if len(input.shape) == 3:
if input.shape[1] == 1:
trd_input_prev_hidden = name
else:
trd_input_encoder_outputs = name
elif len(input.shape) == 1:
trd_input_prev_symbol = name
for name, output in text_dec_exec_net.outputs.items():
if len(output.shape) == 3 and output.shape[1] == 1:
trd_output_cur_hidden = name
elif len(output.shape) == 2:
trd_output_symbols_distr = name
hidden_shape = text_dec_net.inputs[trd_input_prev_hidden].shape
del mask_rcnn_net
del text_enc_net
del text_dec_net
try:
input_source = int(args.input_source)
except ValueError:
input_source = args.input_source
if os.path.isdir(input_source):
cap = FolderCapture(input_source)
else:
cap = cv2.VideoCapture(input_source)
if not cap.isOpened():
log.error('Failed to open "{}"'.format(args.input_source))
if isinstance(cap, cv2.VideoCapture):
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
if args.no_track:
tracker = None
else:
tracker = StaticIOUTracker()
visualizer = Visualizer(['__background__', 'text'], show_boxes=args.show_boxes, show_scores=args.show_scores)
render_time = 0
log.info('Starting inference...')
print("To close the application, press 'CTRL+C' here or switch to the output window and press ESC key")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if not args.keep_aspect_ratio:
# Resize the image to a target size.
scale_x = w / frame.shape[1]
scale_y = h / frame.shape[0]
input_image = cv2.resize(frame, (w, h))
else:
# Resize the image to keep the same aspect ratio and to fit it to a window of a target size.
scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1])
input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y)
input_image_size = input_image.shape[:2]
input_image = np.pad(input_image, ((0, h - input_image_size[0]),
(0, w - input_image_size[1]),
(0, 0)),
mode='constant', constant_values=0)
# Change data layout from HWC to CHW.
input_image = input_image.transpose((2, 0, 1))
input_image = input_image.reshape((n, c, h, w)).astype(np.float32)
input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32)
# Run the net.
inf_start = time.time()
outputs = mask_rcnn_exec_net.infer({'im_data': input_image, 'im_info': input_image_info})
# Parse detection results of the current request
boxes = outputs['boxes']
scores = outputs['scores']
classes = outputs['classes'].astype(np.uint32)
raw_masks = outputs['raw_masks']
text_features = outputs['text_features']
# Filter out detections with low confidence.
detections_filter = scores > args.prob_threshold
scores = scores[detections_filter]
classes = classes[detections_filter]
boxes = boxes[detections_filter]
raw_masks = raw_masks[detections_filter]
text_features = text_features[detections_filter]
boxes[:, 0::2] /= scale_x
boxes[:, 1::2] /= scale_y
masks = []
for box, cls, raw_mask in zip(boxes, classes, raw_masks):
raw_cls_mask = raw_mask[cls, ...]
mask = segm_postprocess(box, raw_cls_mask, frame.shape[0], frame.shape[1])
masks.append(mask)
texts = []
for feature in text_features:
feature = text_enc_exec_net.infer({'input': feature})
feature = list(feature.values())[0]
feature = np.reshape(feature, (feature.shape[0], feature.shape[1], -1))
feature = np.transpose(feature, (0, 2, 1))
hidden = np.zeros(hidden_shape)
prev_symbol_index = np.ones((1,)) * SOS_INDEX
text = ''
for i in range(MAX_SEQ_LEN):
decoder_output = text_dec_exec_net.infer({
trd_input_prev_symbol: prev_symbol_index,
trd_input_prev_hidden: hidden,
trd_input_encoder_outputs: feature})
symbols_distr = decoder_output[trd_output_symbols_distr]
prev_symbol_index = int(np.argmax(symbols_distr, axis=1))
if prev_symbol_index == EOS_INDEX:
break
text += args.alphabet[prev_symbol_index]
hidden = decoder_output[trd_output_cur_hidden]
texts.append(text)
inf_end = time.time()
inf_time = inf_end - inf_start
render_start = time.time()
if len(boxes) and args.raw_output_message:
log.info('Detected boxes:')
log.info(' Class ID | Confidence | XMIN | YMIN | XMAX | YMAX ')
for box, cls, score, mask in zip(boxes, classes, scores, masks):
log.info('{:>10} | {:>10f} | {:>8.2f} | {:>8.2f} | {:>8.2f} | {:>8.2f} '.format(cls, score, *box))
# Get instance track IDs.
masks_tracks_ids = None
if tracker is not None:
masks_tracks_ids = tracker(masks, classes)
# Visualize masks.
frame = visualizer(frame, boxes, classes, scores, masks, texts, masks_tracks_ids)
# Draw performance stats.
inf_time_message = 'Inference and post-processing time: {:.3f} ms'.format(inf_time * 1000)
render_time_message = 'OpenCV rendering time: {:.3f} ms'.format(render_time * 1000)
cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
cv2.putText(frame, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
# Print performance counters.
if args.perf_counts:
perf_counts = mask_rcnn_exec_net.requests[0].get_perf_counts()
log.info('Performance counters:')
print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format('name', 'layer_type', 'exet_type', 'status',
'real_time, us'))
for layer, stats in perf_counts.items():
print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
if not args.no_show:
# Show resulting image.
cv2.imshow('Results', frame)
render_end = time.time()
render_time = render_end - render_start
if not args.no_show:
key = cv2.waitKey(args.delay)
esc_code = 27
if key == esc_code:
break
cv2.destroyAllWindows()
cap.release()
|
27,740 | def determine_setup(
inifile: Optional[str],
args: Sequence[str],
rootdir_cmd_arg: Optional[str] = None,
config: Optional["Config"] = None,
) -> Tuple[Path, Optional[Path], Dict[str, Union[str, List[str]]]]:
rootdir = None
dirs = get_dirs_from_args(args)
if inifile:
inipath_ = Path(inifile).resolve()
inipath = inipath_ # type: Optional[Path]
inicfg = load_config_dict_from_file(inipath_) or {}
if rootdir_cmd_arg is None:
rootdir = get_common_ancestor(dirs)
else:
ancestor = get_common_ancestor(dirs)
rootdir, inipath, inicfg = locate_config([ancestor])
if rootdir is None and rootdir_cmd_arg is None:
for possible_rootdir in itertools.chain(
(ancestor,), reversed(ancestor.parents)
):
if (possible_rootdir / "setup.py").exists():
rootdir = possible_rootdir
break
else:
if dirs != [ancestor]:
rootdir, inipath, inicfg = locate_config(dirs)
if rootdir is None:
if config is not None:
cwd = config.invocation_params.dir
else:
cwd = Path.cwd()
rootdir = get_common_ancestor([cwd, ancestor])
is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/"
if is_fs_root:
rootdir = ancestor
if rootdir_cmd_arg:
rootdir = Path(os.path.expandvars(rootdir_cmd_arg)).resolve()
if not rootdir.is_dir():
raise UsageError(
"Directory '{}' not found. Check your '--rootdir' option.".format(
rootdir
)
)
assert rootdir is not None
return rootdir, inipath, inicfg or {}
| def determine_setup(
inifile: Optional[str],
args: Sequence[str],
rootdir_cmd_arg: Optional[str] = None,
config: Optional["Config"] = None,
) -> Tuple[Path, Optional[Path], Dict[str, Union[str, List[str]]]]:
rootdir = None
dirs = get_dirs_from_args(args)
if inifile:
inipath_ = Path(inifile).resolve()
inipath = inipath_ # type: Optional[Path]
inicfg = load_config_dict_from_file(inipath_) or {}
if rootdir_cmd_arg is None:
rootdir = get_common_ancestor(dirs)
else:
ancestor = get_common_ancestor(dirs)
rootdir, inipath, inicfg = locate_config([ancestor])
if rootdir is None and rootdir_cmd_arg is None:
for possible_rootdir in itertools.chain(
(ancestor,), reversed(ancestor.parents)
):
if (possible_rootdir / "setup.py").exists():
rootdir = possible_rootdir
break
else:
if dirs != [ancestor]:
rootdir, inipath, inicfg = locate_config(dirs)
if rootdir is None:
if config is not None:
cwd = config.invocation_params.dir
else:
cwd = Path.cwd()
rootdir = get_common_ancestor([cwd, ancestor])
is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/"
if is_fs_root:
rootdir = ancestor
if rootdir_cmd_arg:
rootdir = Path(os.path.expandvars(rootdir_cmd_arg)).absolute()
if not rootdir.is_dir():
raise UsageError(
"Directory '{}' not found. Check your '--rootdir' option.".format(
rootdir
)
)
assert rootdir is not None
return rootdir, inipath, inicfg or {}
|
30,527 | def stix_ip_observable(namespace: str, indicator: dict) -> List[Observable]:
"""
Create STIX IP observable.
Args:
namespace: The XML namespace .
indicator: The Demisto IP indicator.
Returns:
STIX IP observable.
"""
category = cybox.objects.address_object.Address.CAT_IPV4
if indicator['indicator_type'] in [FeedIndicatorType.IPv6, FeedIndicatorType.IPv6CIDR]:
category = cybox.objects.address_object.Address.CAT_IPV6
value = indicator['value']
indicator_values = [value]
if '-' in value:
# looks like an IP Range, let's try to make it a CIDR
a1, a2 = value.split('-', 1)
if a1 == a2:
# same IP
indicator_values = [a1]
else:
# use netaddr builtin algo to summarize range into CIDR
iprange = netaddr.IPRange(a1, a2)
cidrs = iprange.cidrs()
indicator_values = list(map(str, cidrs))
observables = []
for indicator_value in indicator_values:
id_ = '{}:observable-{}'.format(
namespace,
uuid.uuid4()
)
address_object = cybox.objects.address_object.Address(
address_value=indicator_value,
category=category
)
observable = Observable(
title='{}: {}'.format(indicator['indicator_type'], indicator_value),
id_=id_,
item=address_object
)
observables.append(observable)
return observables
| def create_stix_ip_observable(namespace: str, indicator: dict) -> List[Observable]:
"""
Create STIX IP observable.
Args:
namespace: The XML namespace .
indicator: The Demisto IP indicator.
Returns:
STIX IP observable.
"""
category = cybox.objects.address_object.Address.CAT_IPV4
if indicator['indicator_type'] in [FeedIndicatorType.IPv6, FeedIndicatorType.IPv6CIDR]:
category = cybox.objects.address_object.Address.CAT_IPV6
value = indicator['value']
indicator_values = [value]
if '-' in value:
# looks like an IP Range, let's try to make it a CIDR
a1, a2 = value.split('-', 1)
if a1 == a2:
# same IP
indicator_values = [a1]
else:
# use netaddr builtin algo to summarize range into CIDR
iprange = netaddr.IPRange(a1, a2)
cidrs = iprange.cidrs()
indicator_values = list(map(str, cidrs))
observables = []
for indicator_value in indicator_values:
id_ = '{}:observable-{}'.format(
namespace,
uuid.uuid4()
)
address_object = cybox.objects.address_object.Address(
address_value=indicator_value,
category=category
)
observable = Observable(
title='{}: {}'.format(indicator['indicator_type'], indicator_value),
id_=id_,
item=address_object
)
observables.append(observable)
return observables
|
35,184 | def _generate_interp_custom(coord_func, ndim, large_int, yshape, mode, cval,
order, name='', integer_output=False):
"""
Args:
coord_func (function): generates code to do the coordinate
transformation. See for example, `_get_coord_shift`.
ndim (int): The number of dimensions.
large_int (bool): If true use Py_ssize_t instead of int for indexing.
yshape (tuple): Shape of the output array.
mode (str): Signal extension mode to use at the array boundaries
cval (float): constant value used when `mode == 'constant'`.
name (str): base name for the interpolation kernel
integer_output (bool): boolean indicating whether the output has an
integer type.
Returns:
operation (str): code body for the ElementwiseKernel
name (str): name for the ElementwiseKernel
"""
ops = []
ops.append('double out = 0.0;')
if large_int:
uint_t = 'size_t'
int_t = 'ptrdiff_t'
else:
uint_t = 'unsigned int'
int_t = 'int'
# determine strides for x along each axis
for j in range(ndim):
ops.append(
'const {int_t} xsize_{j} = x.shape()[{j}];'.format(
int_t=int_t, j=j)
)
ops.append('const {uint_t} sx_{j} = 1;'.format(uint_t=uint_t, j=ndim - 1))
for j in range(ndim - 1, 0, -1):
ops.append(
'const {uint_t} sx_{jm} = sx_{j} * xsize_{j};'.format(
uint_t=uint_t, jm=j - 1, j=j,
)
)
# create in_coords array to store the unraveled indices
ops.append(_unravel_loop_index(yshape, uint_t))
# compute the transformed (target) coordinates, c_j
ops = ops + coord_func(ndim)
if cval is numpy.nan:
cval = 'CUDART_NAN'
elif cval == numpy.inf:
cval = 'CUDART_INF'
elif cval == -numpy.inf:
cval = '-CUDART_INF'
else:
cval = '(double){cval}'.format(cval=cval)
if mode == 'constant':
# use cval if coordinate is outside the bounds of x
_cond = ' || '.join(
['(c_{j} < 0) || (c_{j} > xsize_{j} - 1)'.format(j=j)
for j in range(ndim)])
ops.append("""
if ({cond})
{{
out = {cval};
}}
else
{{""".format(cond=_cond, cval=cval))
if order == 0:
for j in range(ndim):
# determine nearest neighbor
ops.append("""
{int_t} cf_{j} = ({int_t})floor((double)c_{j} + 0.5);''')
""".format(int_t=int_t, j=j))
# handle boundary
if mode != 'constant':
ixvar = 'cf_{j}'.format(j=j)
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, 'xsize_{}'.format(j)))
# sum over ic_j will give the raveled coordinate in the input
ops.append("""
{int_t} ic_{j} = cf_{j} * sx_{j};
""".format(int_t=int_t, j=j))
_coord_idx = ' + '.join(['ic_{}'.format(j) for j in range(ndim)])
ops.append("""
out = x[{coord_idx}];""".format(coord_idx=_coord_idx))
elif order == 1:
for j in range(ndim):
# get coordinates for linear interpolation along axis j
ops.append("""
{int_t} cf_{j} = ({int_t})floor((double)c_{j});
{int_t} cc_{j} = cf_{j} + 1;
{int_t} n_{j} = (c_{j} == cf_{j}) ? 1 : 2; // points needed
""".format(int_t=int_t, j=j))
# handle boundaries for extension modes.
ops.append("""
{int_t} cf_bounded_{j} = cf_{j};
{int_t} cc_bounded_{j} = cc_{j};
""".format(int_t=int_t, j=j))
if mode != 'constant':
ixvar = 'cf_bounded_{j}'.format(j=j)
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, 'xsize_{}'.format(j)))
ixvar = 'cc_bounded_{j}'.format(j=j)
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, 'xsize_{}'.format(j)))
ops.append("""
for (int s_{j} = 0; s_{j} < n_{j}; s_{j}++)
{{
W w_{j};
{int_t} ic_{j};
if (s_{j} == 0)
{{
w_{j} = (W)cc_{j} - c_{j};
ic_{j} = cf_bounded_{j} * sx_{j};
}} else
{{
w_{j} = c_{j} - (W)cf_{j};
ic_{j} = cc_bounded_{j} * sx_{j};
}}""".format(int_t=int_t, j=j))
_weight = ' * '.join(['w_{j}'.format(j=j) for j in range(ndim)])
_coord_idx = ' + '.join(['ic_{j}'.format(j=j) for j in range(ndim)])
ops.append("""
X val = x[{coord_idx}];
out += val * ({weight});""".format(
coord_idx=_coord_idx, weight=_weight))
ops.append('}' * ndim)
if mode == 'constant':
ops.append('}')
if integer_output:
ops.append('y = (Y)rint((double)out);')
else:
ops.append('y = (Y)out;')
operation = '\n'.join(ops)
name = 'interpolate_{}_order{}_{}_{}d_y{}'.format(
name, order, mode, ndim, "_".join(["{}".format(j) for j in yshape]),
)
if uint_t == 'size_t':
name += '_i64'
return operation, name
| def _generate_interp_custom(coord_func, ndim, large_int, yshape, mode, cval,
order, name='', integer_output=False):
"""
Args:
coord_func (function): generates code to do the coordinate
transformation. See for example, `_get_coord_shift`.
ndim (int): The number of dimensions.
large_int (bool): If true use Py_ssize_t instead of int for indexing.
yshape (tuple): Shape of the output array.
mode (str): Signal extension mode to use at the array boundaries
cval (float): constant value used when `mode == 'constant'`.
name (str): base name for the interpolation kernel
integer_output (bool): boolean indicating whether the output has an
integer type.
Returns:
operation (str): code body for the ElementwiseKernel
name (str): name for the ElementwiseKernel
"""
ops = []
ops.append('double out = 0.0;')
if large_int:
uint_t = 'size_t'
int_t = 'ptrdiff_t'
else:
uint_t = 'unsigned int'
int_t = 'int'
# determine strides for x along each axis
for j in range(ndim):
ops.append(
'const {int_t} xsize_{j} = x.shape()[{j}];'.format(
int_t=int_t, j=j)
)
ops.append('const {uint_t} sx_{j} = 1;'.format(uint_t=uint_t, j=ndim - 1))
for j in range(ndim - 1, 0, -1):
ops.append(
'const {uint_t} sx_{jm} = sx_{j} * xsize_{j};'.format(
uint_t=uint_t, jm=j - 1, j=j,
)
)
# create in_coords array to store the unraveled indices
ops.append(_unravel_loop_index(yshape, uint_t))
# compute the transformed (target) coordinates, c_j
ops = ops + coord_func(ndim)
if cval is numpy.nan:
cval = 'CUDART_NAN'
elif cval == numpy.inf:
cval = 'CUDART_INF'
elif cval == -numpy.inf:
cval = '-CUDART_INF'
else:
cval = '(double){cval}'.format(cval=cval)
if mode == 'constant':
# use cval if coordinate is outside the bounds of x
_cond = ' || '.join(
['(c_{j} < 0) || (c_{j} > xsize_{j} - 1)'.format(j=j)
for j in range(ndim)])
ops.append("""
if ({cond})
{{
out = {cval};
}}
else
{{""".format(cond=_cond, cval=cval))
if order == 0:
for j in range(ndim):
# determine nearest neighbor
ops.append("""
{int_t} cf_{j} = ({int_t})floor((double)c_{j} + 0.5);
""".format(int_t=int_t, j=j))
# handle boundary
if mode != 'constant':
ixvar = 'cf_{j}'.format(j=j)
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, 'xsize_{}'.format(j)))
# sum over ic_j will give the raveled coordinate in the input
ops.append("""
{int_t} ic_{j} = cf_{j} * sx_{j};
""".format(int_t=int_t, j=j))
_coord_idx = ' + '.join(['ic_{}'.format(j) for j in range(ndim)])
ops.append("""
out = x[{coord_idx}];""".format(coord_idx=_coord_idx))
elif order == 1:
for j in range(ndim):
# get coordinates for linear interpolation along axis j
ops.append("""
{int_t} cf_{j} = ({int_t})floor((double)c_{j});
{int_t} cc_{j} = cf_{j} + 1;
{int_t} n_{j} = (c_{j} == cf_{j}) ? 1 : 2; // points needed
""".format(int_t=int_t, j=j))
# handle boundaries for extension modes.
ops.append("""
{int_t} cf_bounded_{j} = cf_{j};
{int_t} cc_bounded_{j} = cc_{j};
""".format(int_t=int_t, j=j))
if mode != 'constant':
ixvar = 'cf_bounded_{j}'.format(j=j)
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, 'xsize_{}'.format(j)))
ixvar = 'cc_bounded_{j}'.format(j=j)
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, 'xsize_{}'.format(j)))
ops.append("""
for (int s_{j} = 0; s_{j} < n_{j}; s_{j}++)
{{
W w_{j};
{int_t} ic_{j};
if (s_{j} == 0)
{{
w_{j} = (W)cc_{j} - c_{j};
ic_{j} = cf_bounded_{j} * sx_{j};
}} else
{{
w_{j} = c_{j} - (W)cf_{j};
ic_{j} = cc_bounded_{j} * sx_{j};
}}""".format(int_t=int_t, j=j))
_weight = ' * '.join(['w_{j}'.format(j=j) for j in range(ndim)])
_coord_idx = ' + '.join(['ic_{j}'.format(j=j) for j in range(ndim)])
ops.append("""
X val = x[{coord_idx}];
out += val * ({weight});""".format(
coord_idx=_coord_idx, weight=_weight))
ops.append('}' * ndim)
if mode == 'constant':
ops.append('}')
if integer_output:
ops.append('y = (Y)rint((double)out);')
else:
ops.append('y = (Y)out;')
operation = '\n'.join(ops)
name = 'interpolate_{}_order{}_{}_{}d_y{}'.format(
name, order, mode, ndim, "_".join(["{}".format(j) for j in yshape]),
)
if uint_t == 'size_t':
name += '_i64'
return operation, name
|
38,932 | def deep_update(mapping: Dict[str, Any], updating_mapping: Dict[str, Any]) -> Dict[str, Any]:
for k, v in updating_mapping.items():
if (k in mapping) and isinstance(mapping[k], dict) and isinstance(v, dict):
deep_update(mapping[k], v)
else:
mapping[k] = v
return mapping
| def deep_update(mapping: Dict[KeyType, Any], updating_mapping: Dict[KeyType, Any]) -> Dict[KeyType, Any]:
for k, v in updating_mapping.items():
if (k in mapping) and isinstance(mapping[k], dict) and isinstance(v, dict):
deep_update(mapping[k], v)
else:
mapping[k] = v
return mapping
|
31,014 | def install_nightly_packs(client, host, prints_manager, thread_index, packs_to_install, request_timeout=999999):
"""
Install content packs on nightly build.
Args:
client(demisto_client): The configured client to use.
host (str): The server URL.
prints_manager (ParallelPrintsManager): Print manager object.
thread_index (int): the thread index.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
Returns:
"""
packs_to_install_str = ', '.join([pack['id'] for pack in packs_to_install])
message = 'Installing the following packs in server {}:\n{}'.format(host, packs_to_install_str)
prints_manager.add_print_job(message, print_color, thread_index, LOG_COLORS.GREEN, include_timestamp=True)
prints_manager.execute_thread_prints(thread_index)
# make the pack installation request
global PACK_INSTALL
PACK_INSTALL = False
request_data = {
'packs': packs_to_install,
'ignoreWarnings': True
}
while PACK_INSTALL is not True:
try:
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body=request_data,
accept='application/json',
_request_timeout=request_timeout)
if 200 <= status_code < 300:
message = 'Packs were successfully installed!\n'
prints_manager.add_print_job(message, print_color, thread_index, LOG_COLORS.GREEN,
include_timestamp=True)
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
err_msg = f'Failed to install packs - with status code {status_code}\n{message}\n'
prints_manager.add_print_job(err_msg, print_error, thread_index, include_timestamp=True)
raise Exception(err_msg)
PACK_INSTALL = True
break
except Exception as e:
err_msg = f'The request to install packs has failed. Reason:\n{str(e)}\n'
prints_manager.add_print_job(err_msg, print_error, thread_index, include_timestamp=True)
PACK_INSTALL = False
pack_id = ''
message = str(e).split('\n')
# Get the pack ID of the failed pack.
for line in message:
if line.startswith('HTTP response body: '):
error_message = json.loads(line.split(': ', 1)[1])
error = error_message.get('error')
pack_id = error.split()[-2]
# Removed the bad pack from the list
packs = [pack for pack in packs_to_install if not (pack['id'] == pack_id)]
request_data = {
'packs': packs,
'ignoreWarnings': True
}
finally:
prints_manager.execute_thread_prints(thread_index)
| def install_nightly_packs(client, host, prints_manager, thread_index, packs_to_install, request_timeout=999999):
"""
Install content packs on nightly build.
Args:
client(demisto_client): The configured client to use.
host (str): The server URL.
prints_manager (ParallelPrintsManager): Print manager object.
thread_index (int): the thread index.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
Returns:
"""
packs_to_install_str = '\n '.join([pack['id'] for pack in packs_to_install])
message = 'Installing the following packs in server {}:\n{}'.format(host, packs_to_install_str)
prints_manager.add_print_job(message, print_color, thread_index, LOG_COLORS.GREEN, include_timestamp=True)
prints_manager.execute_thread_prints(thread_index)
# make the pack installation request
global PACK_INSTALL
PACK_INSTALL = False
request_data = {
'packs': packs_to_install,
'ignoreWarnings': True
}
while PACK_INSTALL is not True:
try:
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body=request_data,
accept='application/json',
_request_timeout=request_timeout)
if 200 <= status_code < 300:
message = 'Packs were successfully installed!\n'
prints_manager.add_print_job(message, print_color, thread_index, LOG_COLORS.GREEN,
include_timestamp=True)
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
err_msg = f'Failed to install packs - with status code {status_code}\n{message}\n'
prints_manager.add_print_job(err_msg, print_error, thread_index, include_timestamp=True)
raise Exception(err_msg)
PACK_INSTALL = True
break
except Exception as e:
err_msg = f'The request to install packs has failed. Reason:\n{str(e)}\n'
prints_manager.add_print_job(err_msg, print_error, thread_index, include_timestamp=True)
PACK_INSTALL = False
pack_id = ''
message = str(e).split('\n')
# Get the pack ID of the failed pack.
for line in message:
if line.startswith('HTTP response body: '):
error_message = json.loads(line.split(': ', 1)[1])
error = error_message.get('error')
pack_id = error.split()[-2]
# Removed the bad pack from the list
packs = [pack for pack in packs_to_install if not (pack['id'] == pack_id)]
request_data = {
'packs': packs,
'ignoreWarnings': True
}
finally:
prints_manager.execute_thread_prints(thread_index)
|
13,437 | def _add(project, args):
if '--header-scope' not in args or args['--header-scope'] not in ['public', 'private', 'project']:
header_scope = 'project'
else:
header_scope = args['--header-scope']
parent_group=None
if args['--parent']:
parent_group = project.get_or_create_group(args['--parent'])
options = FileOptions(create_build_files=not args['--no-create-build-files'],
weak=args['--weak'],
ignore_unknown_type=args['--ignore-unknown-types'],
embed_framework=not args['--no-embed'],
code_sign_on_copy=args['--sign-on-copy'],
header_scope=header_scope.title())
build_files = None
if parent_group:
build_files = project.add_file(args['<path>'], tree=args['--tree'], force=False, target_name=args['--target'],
parent=parent_group, file_options=options)
else:
build_files = project.add_file(args['<path>'], tree=args['--tree'], force=False, target_name=args['--target'],
file_options=options)
# print some information about the build files created.
if build_files is None:
raise Exception('No files were added to the project.')
if not build_files:
return 'File added to the project, no build file sections created.'
info = {}
for build_file in build_files:
if build_file.isa not in info:
info[build_file.isa] = 0
info[build_file.isa] += 1
summary = 'File added to the project.'
for k in info:
summary += '\n{0} {1} sections created.'.format(info[k], k)
return summary
| def _add(project, args):
if '--header-scope' not in args or args['--header-scope'] not in ['public', 'private', 'project']:
header_scope = 'project'
else:
header_scope = args['--header-scope']
parent_group = None
if args['--parent']:
parent_group = project.get_or_create_group(args['--parent'])
options = FileOptions(create_build_files=not args['--no-create-build-files'],
weak=args['--weak'],
ignore_unknown_type=args['--ignore-unknown-types'],
embed_framework=not args['--no-embed'],
code_sign_on_copy=args['--sign-on-copy'],
header_scope=header_scope.title())
build_files = None
if parent_group:
build_files = project.add_file(args['<path>'], tree=args['--tree'], force=False, target_name=args['--target'],
parent=parent_group, file_options=options)
else:
build_files = project.add_file(args['<path>'], tree=args['--tree'], force=False, target_name=args['--target'],
file_options=options)
# print some information about the build files created.
if build_files is None:
raise Exception('No files were added to the project.')
if not build_files:
return 'File added to the project, no build file sections created.'
info = {}
for build_file in build_files:
if build_file.isa not in info:
info[build_file.isa] = 0
info[build_file.isa] += 1
summary = 'File added to the project.'
for k in info:
summary += '\n{0} {1} sections created.'.format(info[k], k)
return summary
|
6,747 | def initializeEnvironment(count: int, pid: object) -> Dict[str, str]:
"""
Create a copy of the process environment and add I{LISTEN_FDS} and
I{LISTEN_PID} (the environment variables set by systemd) to it.
"""
result = os.environ.copy()
result["LISTEN_FDS"] = str(count)
result["LISTEN_FDNAMES"] = ":".join([f"{n}.socket" for n in range(count)])
result["LISTEN_PID"] = str(pid)
return result
| def initializeEnvironment(count: int, pid: object) -> Dict[str, str]:
"""
Return a copy of the process environment with I{systemd} specific variables added to it.
"""
result = os.environ.copy()
result["LISTEN_FDS"] = str(count)
result["LISTEN_FDNAMES"] = ":".join([f"{n}.socket" for n in range(count)])
result["LISTEN_PID"] = str(pid)
return result
|
7,584 | def helper_pv2s(f, unit_pv):
from astropy.units.si import radian
check_structured_unit(unit_pv, dt_pv)
ang_unit = radian * unit_pv[1] / unit_pv[0]
return [None], (radian, radian, unit_pv[0], ang_unit, ang_unit, unit_pv[1])
| def helper_pv2s(f, unit_pv):
from astropy.units.si import radian
check_structured_unit(unit_pv, dt_pv)
ang_unit = (radian * unit_pv[1] / unit_pv[0]).decompose()
return [None], (radian, radian, unit_pv[0], ang_unit, ang_unit, unit_pv[1])
|
29,768 | def create_app(debug=None):
""" Generate a Flask app for LB with all configurations done and connections established.
In the Flask app returned, blueprints are not registered.
"""
app = CustomFlask(
import_name=__name__,
use_flask_uuid=True,
)
load_config(app)
if debug is not None:
app.debug = debug
# As early as possible, if debug is True, set the log level of our 'listenbrainz' logger to DEBUG
# to prevent flask from creating a new log handler
if app.debug:
logger = logging.getLogger('listenbrainz')
logger.setLevel(logging.DEBUG)
# initialize Flask-DebugToolbar if the debug option is True
if app.debug and app.config['SECRET_KEY']:
app.init_debug_toolbar()
init_sentry(**app.config.get('LOG_SENTRY'))
# Initialize BU cache and metrics
cache.init(host=app.config['REDIS_HOST'], port=app.config['REDIS_PORT'], namespace=app.config['REDIS_NAMESPACE'])
metrics.init("listenbrainz")
# Database connections
from listenbrainz import db
from listenbrainz.db import timescale as ts
from listenbrainz import messybrainz as msb
db.init_db_connection(app.config['SQLALCHEMY_DATABASE_URI'])
ts.init_db_connection(app.config['SQLALCHEMY_TIMESCALE_URI'])
msb.init_db_connection(app.config['MESSYBRAINZ_SQLALCHEMY_DATABASE_URI'])
# Redis connection
from listenbrainz.webserver.redis_connection import init_redis_connection
init_redis_connection(app.logger)
# Timescale connection
from listenbrainz.webserver.timescale_connection import init_timescale_connection
init_timescale_connection(app)
# RabbitMQ connection
from listenbrainz.webserver.rabbitmq_connection import init_rabbitmq_connection
try:
init_rabbitmq_connection(app)
except ConnectionError:
app.logger.critical("RabbitMQ service is not up!", exc_info=True)
if app.config['MB_DATABASE_URI']:
from brainzutils import musicbrainz_db
musicbrainz_db.init_db_engine(app.config['MB_DATABASE_URI'])
# OAuth
from listenbrainz.webserver.login import login_manager, provider
login_manager.init_app(app)
provider.init(app.config['MUSICBRAINZ_CLIENT_ID'],
app.config['MUSICBRAINZ_CLIENT_SECRET'])
# Error handling
from listenbrainz.webserver.errors import init_error_handlers
init_error_handlers(app)
from brainzutils.ratelimit import inject_x_rate_headers, set_user_validation_function
set_user_validation_function(check_ratelimit_token_whitelist)
@app.after_request
def after_request_callbacks(response):
return inject_x_rate_headers(response)
# Template utilities
app.jinja_env.add_extension('jinja2.ext.do')
from listenbrainz.webserver import utils
app.jinja_env.filters['date'] = utils.reformat_date
app.jinja_env.filters['datetime'] = utils.reformat_datetime
return app
| def create_app(debug=None):
""" Generate a Flask app for LB with all configurations done and connections established.
In the Flask app returned, blueprints are not registered.
"""
app = CustomFlask(
import_name=__name__,
use_flask_uuid=True,
)
load_config(app)
if debug is not None:
app.debug = debug
# As early as possible, if debug is True, set the log level of our 'listenbrainz' logger to DEBUG
# to prevent flask from creating a new log handler
if app.debug:
logger = logging.getLogger('listenbrainz')
logger.setLevel(logging.DEBUG)
# initialize Flask-DebugToolbar if the debug option is True
if app.debug and app.config['SECRET_KEY']:
app.init_debug_toolbar()
sentry.init_sentry(**app.config.get('LOG_SENTRY'))
# Initialize BU cache and metrics
cache.init(host=app.config['REDIS_HOST'], port=app.config['REDIS_PORT'], namespace=app.config['REDIS_NAMESPACE'])
metrics.init("listenbrainz")
# Database connections
from listenbrainz import db
from listenbrainz.db import timescale as ts
from listenbrainz import messybrainz as msb
db.init_db_connection(app.config['SQLALCHEMY_DATABASE_URI'])
ts.init_db_connection(app.config['SQLALCHEMY_TIMESCALE_URI'])
msb.init_db_connection(app.config['MESSYBRAINZ_SQLALCHEMY_DATABASE_URI'])
# Redis connection
from listenbrainz.webserver.redis_connection import init_redis_connection
init_redis_connection(app.logger)
# Timescale connection
from listenbrainz.webserver.timescale_connection import init_timescale_connection
init_timescale_connection(app)
# RabbitMQ connection
from listenbrainz.webserver.rabbitmq_connection import init_rabbitmq_connection
try:
init_rabbitmq_connection(app)
except ConnectionError:
app.logger.critical("RabbitMQ service is not up!", exc_info=True)
if app.config['MB_DATABASE_URI']:
from brainzutils import musicbrainz_db
musicbrainz_db.init_db_engine(app.config['MB_DATABASE_URI'])
# OAuth
from listenbrainz.webserver.login import login_manager, provider
login_manager.init_app(app)
provider.init(app.config['MUSICBRAINZ_CLIENT_ID'],
app.config['MUSICBRAINZ_CLIENT_SECRET'])
# Error handling
from listenbrainz.webserver.errors import init_error_handlers
init_error_handlers(app)
from brainzutils.ratelimit import inject_x_rate_headers, set_user_validation_function
set_user_validation_function(check_ratelimit_token_whitelist)
@app.after_request
def after_request_callbacks(response):
return inject_x_rate_headers(response)
# Template utilities
app.jinja_env.add_extension('jinja2.ext.do')
from listenbrainz.webserver import utils
app.jinja_env.filters['date'] = utils.reformat_date
app.jinja_env.filters['datetime'] = utils.reformat_datetime
return app
|
30,414 | def display_time(seconds):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
result.append("{}{}".format(value, name))
return ' '.join(result)
| def display_time(seconds):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
result.append(f'{value}{name}')
return ' '.join(result)
|
23,652 | def get_total_irradiance(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990', **kwargs):
r"""
Determine total in-plane irradiance and its beam, sky diffuse and ground
reflected components, using the specified sky diffuse irradiance model.
.. math::
I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground}
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.[degree]
surface_azimuth : numeric
Panel azimuth from north. [degree]
solar_zenith : numeric
Solar zenith angle. [degree]
solar_azimuth : numeric
Solar azimuth angle. [degree]
dni : numeric
Direct Normal Irradiance. [W/m2]
ghi : numeric
Global horizontal irradiance. [W/m2]
dhi : numeric
Diffuse horizontal irradiance. [W/m2]
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance. [W/m2]
airmass : None or numeric, default None
Relative airmass (not adjusted for pressure). [unitless]
albedo : numeric, default 0.25
Surface albedo. [unitless]
surface_type : None or String, default None
Surface type. See :py:func:`~pvlib.irradiance.grounddiffuse` for
the list of accepted values.
model : String, default 'isotropic'
Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`.
Returns
-------
total_irrad : OrderedDict or DataFrame
Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse',
'poa_sky_diffuse', 'poa_ground_diffuse'``.
Notes
-----
Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can
be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`.
The 'perez' model requires relative airmass ('airmass') as input. If
'airmass' is not provided, it is calculated usign the defaults in
:py:func:`~pvlib.irradiance.get_relative_airmass`.
"""
poa_sky_diffuse = get_sky_diffuse(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model,
model_perez=model_perez)
poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo,
surface_type)
aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth)
irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse)
return irrads
| def get_total_irradiance(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990', **kwargs):
r"""
Determine total in-plane irradiance and its beam, sky diffuse and ground
reflected components, using the specified sky diffuse irradiance model.
.. math::
I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground}
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.[degree]
surface_azimuth : numeric
Panel azimuth from north. [degree]
solar_zenith : numeric
Solar zenith angle. [degree]
solar_azimuth : numeric
Solar azimuth angle. [degree]
dni : numeric
Direct Normal Irradiance. [W/m2]
ghi : numeric
Global horizontal irradiance. [W/m2]
dhi : numeric
Diffuse horizontal irradiance. [W/m2]
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance. [W/m2]
airmass : None or numeric, default None
Relative airmass (not adjusted for pressure). [unitless]
albedo : numeric, default 0.25
Surface albedo. [unitless]
surface_type : None or String, default None
Surface type. See :py:func:`~pvlib.irradiance.grounddiffuse` for
the list of accepted values.
model : String, default 'isotropic'
Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`.
Returns
-------
total_irrad : OrderedDict or DataFrame
Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse',
'poa_sky_diffuse', 'poa_ground_diffuse'``.
Notes
-----
Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can
be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`.
The 'perez' model requires relative airmass ('airmass') as input. If
'airmass' is not provided, it is calculated usign the defaults in
:py:func:`~pvlib.atmosphere.get_relative_airmass`.
"""
poa_sky_diffuse = get_sky_diffuse(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model,
model_perez=model_perez)
poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo,
surface_type)
aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth)
irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse)
return irrads
|
45,636 | def ncbi_gdp_to_list(file_location="", header_rows=1):
"""
Converts NCBI Genome Ideogram data to a Python list for use with
Ideogram.js
:param file_location: The location of the file you want to parse, using a relative path.
:param header_rows: The header rows you want to remove from your dataset.
:returns: A list containing the NCBI Genome Ideogram data, where each index
contains a row of the data set as a string.
"""
dataset_container = []
try:
with open(file_location) as tsv:
for line in csv.reader(tsv, delimiter="\t"):
row_string = ' '.join(str(row) for row in line)
dataset_container.append(row_string)
for x in range(header_rows):
del dataset_container[x]
return dataset_container
except Exception as e:
print(e)
| def ncbi_gdp_to_list(file_location="", header_rows=1):
"""
Convert NCBI Genome Ideogram data to a Python list for use with
Ideogram.js
:param file_location: The location of the file you want to parse, using a relative path.
:param header_rows: The header rows you want to remove from your dataset.
:returns: A list containing the NCBI Genome Ideogram data, where each index
contains a row of the data set as a string.
"""
dataset_container = []
try:
with open(file_location) as tsv:
for line in csv.reader(tsv, delimiter="\t"):
row_string = ' '.join(str(row) for row in line)
dataset_container.append(row_string)
for x in range(header_rows):
del dataset_container[x]
return dataset_container
except Exception as e:
print(e)
|
30,396 | def check_file_command():
file_hash_input = demisto.args().get('file')
file_hashes = file_hash_input.split(',')
for file_hash in file_hashes:
if len(file_hash) != 32 and len(file_hash) != 40 and len(file_hash) != 64:
return_error('Invalid input hash')
hash_type = 'md5'
if len(file_hash) == 40:
hash_type = 'sha1'
if len(file_hash) == 64:
hash_type = 'sha256'
raw_response = check_file(file_hash, hash_type)
data = raw_response.get('data')
if len(data) == 0:
return_outputs(
'No submission found in VMRay for hash : {}'.format(file_hash),
{},
)
for item in data:
entry = dict()
entry['SampleID'] = item.get('sample_id')
entry['FileName'] = item.get('sample_filename')
entry['MD5'] = item.get('sample_md5hash')
entry['SHA1'] = item.get('sample_sha1hash')
entry['SHA256'] = item.get('sample_sha256hash')
entry['SSDeep'] = item.get('sample_ssdeephash')
entry['Severity'] = SEVERITY_DICT.get(item.get('sample_severity'))
entry['Type'] = item.get('sample_type')
entry['Created'] = item.get('sample_created')
entry['Classification'] = item.get('sample_classifications')
scores = dbot_score_by_hash(entry)
entry_context = {
'VMRay.Sample(var.SampleID === obj.SampleID)': entry,
outputPaths.get('dbotscore'): scores,
}
human_readable = tableToMarkdown(
'Results for sample id: {} with severity {}'.format(
entry.get('SampleID'), entry.get('Severity')
),
entry,
headers=['Type', 'MD5', 'SHA1', 'SHA256', 'SSDeep'],
)
return_outputs(human_readable, entry_context, raw_response=raw_response)
| def check_file_command():
file_hash_input = demisto.args().get('file')
file_hashes = file_hash_input.split(',')
for file_hash in file_hashes:
if len(file_hash) != 32 and len(file_hash) != 40 and len(file_hash) != 64:
return_error('Invalid input hash')
hash_type = 'md5'
if len(file_hash) == 40:
hash_type = 'sha1'
if len(file_hash) == 64:
hash_type = 'sha256'
raw_response = check_file(file_hash, hash_type)
data = raw_response.get('data')
if not data:
return_outputs(
'No submission found in VMRay for hash : {}'.format(file_hash),
{},
)
for item in data:
entry = dict()
entry['SampleID'] = item.get('sample_id')
entry['FileName'] = item.get('sample_filename')
entry['MD5'] = item.get('sample_md5hash')
entry['SHA1'] = item.get('sample_sha1hash')
entry['SHA256'] = item.get('sample_sha256hash')
entry['SSDeep'] = item.get('sample_ssdeephash')
entry['Severity'] = SEVERITY_DICT.get(item.get('sample_severity'))
entry['Type'] = item.get('sample_type')
entry['Created'] = item.get('sample_created')
entry['Classification'] = item.get('sample_classifications')
scores = dbot_score_by_hash(entry)
entry_context = {
'VMRay.Sample(var.SampleID === obj.SampleID)': entry,
outputPaths.get('dbotscore'): scores,
}
human_readable = tableToMarkdown(
'Results for sample id: {} with severity {}'.format(
entry.get('SampleID'), entry.get('Severity')
),
entry,
headers=['Type', 'MD5', 'SHA1', 'SHA256', 'SSDeep'],
)
return_outputs(human_readable, entry_context, raw_response=raw_response)
|
38,931 | def deep_update(mapping: Dict[str, Any], updating_mapping: Dict[str, Any]) -> Dict[str, Any]:
for k, v in updating_mapping.items():
if (k in mapping) and isinstance(mapping[k], dict) and isinstance(v, dict):
deep_update(mapping[k], v)
else:
mapping[k] = v
return mapping
| def deep_update(mapping: Dict[str, Any], updating_mapping: Dict[str, Any]) -> Dict[str, Any]:
for k, v in updating_mapping.items():
if k in mapping and isinstance(mapping[k], dict) and isinstance(v, dict):
deep_update(mapping[k], v)
else:
mapping[k] = v
return mapping
|
58,787 | def conv_shape_func(attrs, inputs, _):
"""Shape function for conv*d op."""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
if attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
return [
_conv_shape_func_nchw(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
)
]
if attrs["data_layout"] == "NHWC":
if attrs["kernel_layout"] == "HWIO":
return [
_conv_shape_func_nhwc_hwio(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
)
]
if attrs["kernel_layout"] == "HWOI":
return [
_conv_shape_func_nhwc_hwoi(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
)
]
raise ValueError(
"Unsupported data/kernel layout: %s, %s" % (attrs["data_layout"], attrs["kernel_layout"])
)
| def conv_shape_func(attrs, inputs, _):
"""Shape function for conv*d op."""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
shape_func = None
if attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
shape_func = conv_shape_func_nchw
elif attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
shape_func = conv_shape_func_nhwc_hwio
elif attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWOI":
shape_func = conv_shape_func_nhwc_hwoi
else:
raise ValueError(
"Unsupported data/kernel layout: %s, %s" % (attrs["data_layout"], attrs["kernel_layout"])
)
return [shape_func(inputs[0], inputs[1], convert(strides), convert(padding), convert(dilation))]
|
53,888 | def _get_sun_angles(vis: xr.DataArray, lons: da.Array, lats: da.Array) -> tuple[xr.DataArray, xr.DataArray]:
suna = get_alt_az(vis.attrs['start_time'], lons, lats)[1]
suna = np.rad2deg(suna)
sunz = sun_zenith_angle(vis.attrs['start_time'], lons, lats)
return suna, sunz
| def _get_sun_angles(data_arr: xr.DataArray, lons: da.Array, lats: da.Array) -> tuple[xr.DataArray, xr.DataArray]:
suna = get_alt_az(data_arr.attrs['start_time'], lons, lats)[1]
suna = np.rad2deg(suna)
sunz = sun_zenith_angle(data_arr.attrs['start_time'], lons, lats)
return suna, sunz
|
36,599 | def _type_check(arg, msg, is_argument=True):
"""Check that the argument is a type, and return it (internal helper).
As a special case, accept None and return type(None) instead. Also wrap strings
into ForwardRef instances. Consider several corner cases, for example plain
special forms like Union are not valid, while Union[int, str] is OK, etc.
The msg argument is a human-readable error message, e.g::
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
invalid_generic_forms = (Generic, Protocol)
if is_argument:
invalid_generic_forms = invalid_generic_forms + (ClassVar, Final)
if arg is None:
return type(None)
if isinstance(arg, str):
return ForwardRef(arg)
if (isinstance(arg, _GenericAlias) and
arg.__origin__ in invalid_generic_forms):
raise TypeError(f"{arg} is not valid as type argument")
if arg in (Any, NoReturn):
return arg
if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):
raise TypeError(f"Plain {arg} is not valid as type argument")
if isinstance(arg, (type, TypeVar, ForwardRef, types.Union, ParamSpec)):
return arg
if not callable(arg):
raise TypeError(f"{msg} Got {arg!r:.100}.")
if isinstance(arg, _ConcatenateGenericAlias):
raise TypeError(f"{arg} is not valid as a type argument "
"except in Callable.")
return arg
| def _type_check(arg, msg, is_argument=True):
"""Check that the argument is a type, and return it (internal helper).
As a special case, accept None and return type(None) instead. Also wrap strings
into ForwardRef instances. Consider several corner cases, for example plain
special forms like Union are not valid, while Union[int, str] is OK, etc.
The msg argument is a human-readable error message, e.g::
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
invalid_generic_forms = (Generic, Protocol)
if is_argument:
invalid_generic_forms = invalid_generic_forms + (ClassVar, Final)
if arg is None:
return type(None)
if isinstance(arg, str):
return ForwardRef(arg)
if (isinstance(arg, _GenericAlias) and
arg.__origin__ in invalid_generic_forms):
raise TypeError(f"{arg} is not valid as type argument")
if arg in (Any, NoReturn):
return arg
if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):
raise TypeError(f"Plain {arg} is not valid as type argument")
if isinstance(arg, (type, TypeVar, ForwardRef, types.Union, ParamSpec)):
return arg
if not callable(arg):
raise TypeError(f"{msg} Got {arg!r:.100}.")
if isinstance(arg, _ConcatenateGenericAlias):
raise TypeError(f"Concatenate[...] is not valid as a type argument "
"except in Callable.")
return arg
|
3,357 | def get_unversioned_asset_url(module, key, cache_bust=False):
"""
Returns an asset URL that is unversioned. These assets should have a
`Cache-Control: max-age=0, must-revalidate` so that clients must validate with the origin
server before using their locally cached asset.
XXX(epurkhiser): As a temporary workaround for flakeyness with the CDN,
we're busting caches when version_bust is True using a query parameter with
the currently deployed backend SHA. This will have to change in the future
for frontend only deploys.
Example:
{% unversioned_asset_url 'sentry' 'sentry.css' %}
=> "/_static/dist/sentry/sentry.css"
{% unversioned_asset_url 'sentry' 'sentry.css' cache_bust=True %}
=> "/_static/dist/sentry/sentry.css?bust=xxx"
"""
args = (settings.STATIC_UNVERSIONED_URL.rstrip("/"), module, key.lstrip("/"))
if not cache_bust:
return "{}/{}/{}".format(*args)
return "{}/{}/{}?bust={}".format(*args, sentry.__build__)
| def get_unversioned_asset_url(module, key, cache_bust=False):
"""
Returns an asset URL that is unversioned. These assets should have a
`Cache-Control: max-age=0, must-revalidate` so that clients must validate with the origin
server before using their locally cached asset.
XXX(epurkhiser): As a temporary workaround for flakeyness with the CDN,
we're busting caches when version_bust is True using a query parameter with
the currently deployed backend SHA. This will have to change in the future
for frontend only deploys.
Example:
{% unversioned_asset_url 'sentry' 'sentry.css' %}
=> "/_static/dist/sentry/sentry.css"
{% unversioned_asset_url 'sentry' 'sentry.css' cache_bust=True %}
=> "/_static/dist/sentry/sentry.css?bust=xxx"
"""
args = (settings.STATIC_UNVERSIONED_URL.rstrip("/"), module, key.lstrip("/"))
if not cache_bust:
return "{}/{}/{}".format(*args)
return "{}/{}/{}?ver={}".format(*args, sentry.__build__)
|
31,941 | def complete_auth(client: DataExplorerClient) -> str:
"""
Start the authorization process.
Args:
client (DataExplorerClient): Azure Data Explorer API client.
Returns:
str: Message about completing the authorization process successfully.
"""
client.ms_client.get_access_token()
return 'Authorization completed successfully.'
| def complete_auth(client: DataExplorerClient) -> str:
"""
Start the authorization process.
Args:
client (DataExplorerClient): Azure Data Explorer API client.
Returns:
str: Message about completing the authorization process successfully.
"""
client.ms_client.get_access_token()
return '✅ Authorization completed successfully.'
|
41,506 | def test_tmu_tilde(caplog):
mu = 1.0
pdf = pyhf.simplemodels.hepdata_like([6], [9], [3])
data = [9] + pdf.config.auxdata
init_pars = pdf.config.suggested_init()
par_bounds = pdf.config.suggested_bounds()
par_bounds[pdf.config.poi_index] = [-10, 10]
with caplog.at_level(logging.WARNING, 'pyhf.infer.test_statistics'):
pyhf.infer.test_statistics.tmu_tilde(mu, data, pdf, init_pars, par_bounds)
assert 'WARNING tmu tilde test statistic used for fit' in caplog.text
| def test_tmu_tilde(caplog):
mu = 1.0
pdf = pyhf.simplemodels.hepdata_like([6], [9], [3])
data = [9] + pdf.config.auxdata
init_pars = pdf.config.suggested_init()
par_bounds = pdf.config.suggested_bounds()
par_bounds[pdf.config.poi_index] = [-10, 10]
with caplog.at_level(logging.WARNING, 'pyhf.infer.test_statistics'):
pyhf.infer.test_statistics.tmu_tilde(mu, data, pdf, init_pars, par_bounds)
assert 'WARNING tmu_tilde test statistic used for fit' in caplog.text
|
28,588 | def plot_bpv(
data,
kind="u_value",
t_stat="median",
bpv=True,
plot_mean=True,
reference="analytical",
mse=False,
n_ref=100,
hdi_prob=0.94,
color="C0",
grid=None,
figsize=None,
textsize=None,
labeller=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
ax=None,
backend=None,
plot_ref_kwargs=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Parameters
----------
data : az.InferenceData object
:class:`arviz.InferenceData` object containing the observed and
posterior/prior predictive data.
kind : str
Type of plot to display ("p_value", "u_value", "t_stat"). Defaults to u_value.
For "p_value" we compute p := p(y* ≤ y | y). This is the probability of the data y being
larger or equal than the predicted data y*. The ideal value is 0.5 (half the predictions
below and half above the data).
For "u_value" we compute pi := p(yi* ≤ yi | y). i.e. like a p_value but per observation yi.
This is also known as marginal p_value. The ideal distribution is uniform. This is similar
to the LOO-pit calculation/plot, the difference is than in LOO-pit plot we compute
pi = p(yi* r ≤ yi | y-i ), where y-i, is all other data except yi.
For "t_stat" we compute := p(T(y)* ≤ T(y) | y) where T is any T statistic. See t_stat
argument below for details of available options.
t_stat : str, float, or callable
T statistics to compute from the observations and predictive distributions. Allowed strings
are "mean", "median" or "std". Defaults to "median". Alternative a quantile can be passed
as a float (or str) in the interval (0, 1). Finally a user defined function is also
acepted, see examples section for details.
bpv : bool
If True (default) add the bayesian p_value to the legend when ``kind = t_stat``.
plot_mean : bool
Whether or not to plot the mean T statistic. Defaults to True.
reference : str
How to compute the distributions used as reference for u_values or p_values. Allowed values
are "analytical" (default) and "samples". Use `None` to do not plot any reference.
Defaults to "samples".
mse :bool
Show scaled mean square error between uniform distribution and marginal p_value
distribution. Defaults to False.
n_ref : int, optional
Number of reference distributions to sample when ``reference=samples``. Defaults to 100.
hdi_prob: float, optional
Probability for the highest density interval for the analytical reference distribution when
computing u_values. Should be in the interval (0, 1]. Defaults to
0.94.
color : str
Matplotlib color
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize : tuple
Figure size. If None it will be defined automatically.
textsize : float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on ``figsize``.
data_pairs : dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, ``data_pairs = {'y' : 'y_hat'}``
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
labeller : labeller instance, optional
Class providing the method ``make_pp_label`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
var_names : list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the variables by ``~``
when you want to exclude them from the plot.
filter_vars : {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords : dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten : list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp : list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the coords argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs
parameters. If flatten is defined and flatten_pp is None, then ``flatten_pp=flatten``.
legend : bool
Add legend to figure. By default True.
ax : numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend : str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
plot_ref_kwargs : dict, optional
Extra keyword arguments to control how reference is represented.
Passed to :meth:`matplotlib.axes.Axes.plot` or
:meth:`matplotlib.axes.Axes.axhspan` (when ``kind=u_value``
and ``reference=analytical``).
backend_kwargs : bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`. For additional documentation
check the plotting method of the backend.
group : {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
Other value can be 'prior'.
show : bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_ppc : Plot for posterior/prior predictive checks.
plot_loo_pit : Plot Leave-One-Out probability integral transformation (PIT) predictive checks.
plot_dist_comparison : Plot to compare fitted and unfitted distributions.
References
----------
* Gelman et al. (2013) see http://www.stat.columbia.edu/~gelman/book/ pages 151-153 for details
Examples
--------
Plot Bayesian p_values.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="p_value")
Plot custom T statistic comparison.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="t_stat", t_stat=lambda x:np.percentile(x, q=50, axis=-1))
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in (f"{group}_predictive", "observed_data"):
if not hasattr(data, groups):
raise TypeError(f'`data` argument must have the group "{groups}"')
if kind.lower() not in ("t_stat", "u_value", "p_value"):
raise TypeError("`kind` argument must be either `t_stat`, `u_value`, or `p_value`")
if reference is not None:
if reference.lower() not in ("analytical", "samples"):
raise TypeError(
"`reference` argument must be either `analytical`, `samples`, or `None`"
)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
if data_pairs is None:
data_pairs = {}
if labeller is None:
labeller = BaseLabeller()
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
observed = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed.data_vars)
var_names = _var_names(var_names, observed, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed.dims.keys())
if coords is None:
coords = {}
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
for key in coords.keys():
coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True
)
),
"plot_t_stats",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters, grid=grid)
bpvplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
total_pp_samples=total_pp_samples,
kind=kind,
bpv=bpv,
t_stat=t_stat,
reference=reference,
mse=mse,
n_ref=n_ref,
hdi_prob=hdi_prob,
plot_mean=plot_mean,
color=color,
figsize=figsize,
textsize=textsize,
labeller=labeller,
plot_ref_kwargs=plot_ref_kwargs,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_bpv", "bpvplot", backend)
axes = plot(**bpvplot_kwargs)
return axes
| def plot_bpv(
data,
kind="u_value",
t_stat="median",
bpv=True,
plot_mean=True,
reference="analytical",
mse=False,
n_ref=100,
hdi_prob=0.94,
color="C0",
grid=None,
figsize=None,
textsize=None,
labeller=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
ax=None,
backend=None,
plot_ref_kwargs=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Parameters
----------
data : az.InferenceData object
:class:`arviz.InferenceData` object containing the observed and
posterior/prior predictive data.
kind : str
Type of plot to display ("p_value", "u_value", "t_stat"). Defaults to u_value.
For "p_value" we compute p := p(y* ≤ y | y). This is the probability of the data y being
larger or equal than the predicted data y*. The ideal value is 0.5 (half the predictions
below and half above the data).
For "u_value" we compute pi := p(yi* ≤ yi | y). i.e. like a p_value but per observation yi.
This is also known as marginal p_value. The ideal distribution is uniform. This is similar
to the LOO-pit calculation/plot, the difference is than in LOO-pit plot we compute
pi = p(yi* r ≤ yi | y-i ), where y-i, is all other data except yi.
For "t_stat" we compute := p(T(y)* ≤ T(y) | y) where T is any T statistic. See t_stat
argument below for details of available options.
t_stat : str, float, or callable
T statistics to compute from the observations and predictive distributions. Allowed strings
are "mean", "median" or "std". Defaults to "median". Alternative a quantile can be passed
as a float (or str) in the interval (0, 1). Finally a user defined function is also
acepted, see examples section for details.
bpv : bool
If True (default) add the Bayesian p_value to the legend when ``kind = t_stat``.
plot_mean : bool
Whether or not to plot the mean T statistic. Defaults to True.
reference : str
How to compute the distributions used as reference for u_values or p_values. Allowed values
are "analytical" (default) and "samples". Use `None` to do not plot any reference.
Defaults to "samples".
mse :bool
Show scaled mean square error between uniform distribution and marginal p_value
distribution. Defaults to False.
n_ref : int, optional
Number of reference distributions to sample when ``reference=samples``. Defaults to 100.
hdi_prob: float, optional
Probability for the highest density interval for the analytical reference distribution when
computing u_values. Should be in the interval (0, 1]. Defaults to
0.94.
color : str
Matplotlib color
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize : tuple
Figure size. If None it will be defined automatically.
textsize : float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on ``figsize``.
data_pairs : dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, ``data_pairs = {'y' : 'y_hat'}``
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
labeller : labeller instance, optional
Class providing the method ``make_pp_label`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
var_names : list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the variables by ``~``
when you want to exclude them from the plot.
filter_vars : {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords : dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten : list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp : list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the coords argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs
parameters. If flatten is defined and flatten_pp is None, then ``flatten_pp=flatten``.
legend : bool
Add legend to figure. By default True.
ax : numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend : str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
plot_ref_kwargs : dict, optional
Extra keyword arguments to control how reference is represented.
Passed to :meth:`matplotlib.axes.Axes.plot` or
:meth:`matplotlib.axes.Axes.axhspan` (when ``kind=u_value``
and ``reference=analytical``).
backend_kwargs : bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`. For additional documentation
check the plotting method of the backend.
group : {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
Other value can be 'prior'.
show : bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_ppc : Plot for posterior/prior predictive checks.
plot_loo_pit : Plot Leave-One-Out probability integral transformation (PIT) predictive checks.
plot_dist_comparison : Plot to compare fitted and unfitted distributions.
References
----------
* Gelman et al. (2013) see http://www.stat.columbia.edu/~gelman/book/ pages 151-153 for details
Examples
--------
Plot Bayesian p_values.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="p_value")
Plot custom T statistic comparison.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="t_stat", t_stat=lambda x:np.percentile(x, q=50, axis=-1))
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in (f"{group}_predictive", "observed_data"):
if not hasattr(data, groups):
raise TypeError(f'`data` argument must have the group "{groups}"')
if kind.lower() not in ("t_stat", "u_value", "p_value"):
raise TypeError("`kind` argument must be either `t_stat`, `u_value`, or `p_value`")
if reference is not None:
if reference.lower() not in ("analytical", "samples"):
raise TypeError(
"`reference` argument must be either `analytical`, `samples`, or `None`"
)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
if data_pairs is None:
data_pairs = {}
if labeller is None:
labeller = BaseLabeller()
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
observed = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed.data_vars)
var_names = _var_names(var_names, observed, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed.dims.keys())
if coords is None:
coords = {}
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
for key in coords.keys():
coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True
)
),
"plot_t_stats",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters, grid=grid)
bpvplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
total_pp_samples=total_pp_samples,
kind=kind,
bpv=bpv,
t_stat=t_stat,
reference=reference,
mse=mse,
n_ref=n_ref,
hdi_prob=hdi_prob,
plot_mean=plot_mean,
color=color,
figsize=figsize,
textsize=textsize,
labeller=labeller,
plot_ref_kwargs=plot_ref_kwargs,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_bpv", "bpvplot", backend)
axes = plot(**bpvplot_kwargs)
return axes
|
1,608 | def plot_partial_dependence(estimator, X, features, feature_names=None,
target=None, response_method='auto', n_cols=3,
grid_resolution=100, percentiles=(0.05, 0.95),
method='auto', n_jobs=None, verbose=0, fig=None,
line_kw=None, contour_kw=None, ax=None):
"""Partial dependence plots.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour plots. The
deciles of the feature values will be shown with tick marks on the x-axes
for one-way plots, and on both axes for two-way plots.
.. note::
:func:`plot_partial_dependence` does not support using the same axes
with multiple calls. To plot the the partial dependence for multiple
estimators, please pass the axes created by the first call to the
second call::
>>> from sklearn.inspection import plot_partial_dependence
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.linear_model import LinearRegression
>>> X, y = make_friedman1()
>>> est = LinearRegression().fit(X, y)
>>> disp1 = plot_partial_dependence(est, X) # doctest: +SKIP
>>> disp2 = plot_partial_dependence(est, X,
... ax=disp1.axes_) # doctest: +SKIP
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
estimator : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
X : {array-like or dataframe} of shape (n_samples, n_features)
``X`` is used to generate a grid of values for the target
``features`` (where the partial dependence will be evaluated), and
also to generate values for the complement features when the
`method` is 'brute'.
features : list of {int, str, pair of int, pair of str}
The target features for which to create the PDPs.
If features[i] is an int or a string, a one-way PDP is created; if
features[i] is a tuple, a two-way PDP is created. Each tuple must be
of size 2.
if any entry is a string, then it must be in ``feature_names``.
feature_names : array-like of shape (n_features,), dtype=str, default=None
Name of each feature; feature_names[i] holds the name of the feature
with index i.
By default, the name of the feature corresponds to their numerical
index for NumPy array and their column name for pandas dataframe.
target : int, optional (default=None)
- In a multiclass setting, specifies the class for which the PDPs
should be computed. Note that for binary classification, the
positive class (index 1) is always used.
- In a multioutput setting, specifies the task for which the PDPs
should be computed.
Ignored in binary classification or classical regression settings.
response_method : 'auto', 'predict_proba' or 'decision_function', \
optional (default='auto')
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist. If
``method`` is 'recursion', the response is always the output of
:term:`decision_function`.
n_cols : int, optional (default=3)
The maximum number of columns in the grid plot. Only active when `ax`
is a single axis or `None`.
grid_resolution : int, optional (default=100)
The number of equally spaced points on the axes of the plots, for each
target feature.
percentiles : tuple of float, optional (default=(0.05, 0.95))
The lower and upper percentile used to create the extreme values
for the PDP axes. Must be in [0, 1].
method : str, optional (default='auto')
The method used to calculate the averaged predictions:
- 'recursion' is only supported for some tree-based estimators (namely
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`)
but is more efficient in terms of speed.
With this method, the target response of a
classifier is always the decision function, not the predicted
probabilities.
- 'brute' is supported for any estimator, but is more
computationally intensive.
- 'auto': the 'recursion' is used for estimators that support it,
and 'brute' is used otherwise.
Please see :ref:`this note <pdp_method_differences>` for
differences between the 'brute' and 'recursion 'method.
n_jobs : int, optional (default=None)
The number of CPUs to use to compute the partial dependences.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, optional (default=0)
Verbose output during PD computations.
fig : Matplotlib figure object, optional (default=None)
A figure object onto which the plots will be drawn, after the figure
has been cleared. By default, a new one is created.
.. deprecated:: 0.22
``fig`` will be removed in 0.24.
line_kw : dict, optional
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots.
contour_kw : dict, optional
Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call.
For two-way partial dependence plots.
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
- If a single axis is passed in, it is treated as a bounding axes
and a grid of partial dependence plots will be drawn within
these bounds. The `n_cols` parameter controls the number of
columns in the grid.
- If an array-like of axes are passed in, the partial dependence
plots will be drawn directly into these axes.
- If `None`, a figure and a bounding axes is created and treated
as the single axes case.
.. versionadded:: 0.22
Returns
-------
display: :class:`~sklearn.inspection.PartialDependenceDisplay`
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
See also
--------
sklearn.inspection.partial_dependence: Return raw partial
dependence values
"""
check_matplotlib_support('plot_partial_dependence') # noqa
import matplotlib.pyplot as plt # noqa
from matplotlib import transforms # noqa
from matplotlib.ticker import MaxNLocator # noqa
from matplotlib.ticker import ScalarFormatter # noqa
# set target_idx for multi-class estimators
if hasattr(estimator, 'classes_') and np.size(estimator.classes_) > 2:
if target is None:
raise ValueError('target must be specified for multi-class')
target_idx = np.searchsorted(estimator.classes_, target)
if (not (0 <= target_idx < len(estimator.classes_)) or
estimator.classes_[target_idx] != target):
raise ValueError('target not in est.classes_, got {}'.format(
target))
else:
# regression and binary classification
target_idx = 0
# Use check_array only on lists and other non-array-likes / sparse. Do not
# convert DataFrame into a NumPy array.
if not(hasattr(X, '__array__') or sparse.issparse(X)):
X = check_array(X, force_all_finite='allow-nan', dtype=np.object)
n_features = X.shape[1]
# convert feature_names to list
if feature_names is None:
if hasattr(X, "loc"):
# get the column names for a pandas dataframe
feature_names = X.columns.tolist()
else:
# define a list of numbered indices for a numpy array
feature_names = [str(i) for i in range(n_features)]
elif hasattr(feature_names, "tolist"):
# convert numpy array or pandas index to a list
feature_names = feature_names.tolist()
if len(set(feature_names)) != len(feature_names):
raise ValueError('feature_names should not contain duplicates.')
def convert_feature(fx):
if isinstance(fx, str):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return int(fx)
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral, str)):
fxs = (fxs,)
try:
fxs = tuple(convert_feature(fx) for fx in fxs)
except TypeError:
raise ValueError('Each entry in features must be either an int, '
'a string, or an iterable of size at most 2.')
if not 1 <= np.size(fxs) <= 2:
raise ValueError('Each entry in features must be either an int, '
'a string, or an iterable of size at most 2.')
tmp_features.append(fxs)
features = tmp_features
# Early exit if the axes does not have the correct number of axes
if ax is not None and not isinstance(ax, plt.Axes):
axes = np.asarray(ax, dtype=object)
if axes.size != len(features):
raise ValueError("Expected ax to have {} axes, got {}".format(
len(features), axes.size))
for i in chain.from_iterable(features):
if i >= len(feature_names):
raise ValueError('All entries of features must be less than '
'len(feature_names) = {0}, got {1}.'
.format(len(feature_names), i))
# compute averaged predictions
pd_results = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(estimator, X, fxs,
response_method=response_method,
method=method,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# For multioutput regression, we can only check the validity of target
# now that we have the predictions.
# Also note: as multiclass-multioutput classifiers are not supported,
# multiclass and multioutput scenario are mutually exclusive. So there is
# no risk of overwriting target_idx here.
avg_preds, _ = pd_results[0] # checking the first result is enough
if is_regressor(estimator) and avg_preds.shape[0] > 1:
if target is None:
raise ValueError(
'target must be specified for multi-output regressors')
if not 0 <= target <= avg_preds.shape[0]:
raise ValueError(
'target must be in [0, n_tasks], got {}.'.format(target))
target_idx = target
# get global min and max average predictions of PD grouped by plot type
pdp_lim = {}
for avg_preds, values in pd_results:
min_pd = avg_preds[target_idx].min()
max_pd = avg_preds[target_idx].max()
n_fx = len(values)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
deciles = {}
for fx in chain.from_iterable(features):
if fx not in deciles:
X_col = _safe_indexing(X, fx, axis=1)
deciles[fx] = mquantiles(X_col, prob=np.arange(0.1, 1.0, 0.1))
if fig is not None:
warnings.warn("The fig parameter is deprecated in version "
"0.22 and will be removed in version 0.24",
FutureWarning)
fig.clear()
ax = fig.gca()
display = PartialDependenceDisplay(pd_results, features, feature_names,
target_idx, pdp_lim, deciles)
return display.plot(ax=ax, n_cols=n_cols, line_kw=line_kw,
contour_kw=contour_kw)
| def plot_partial_dependence(estimator, X, features, feature_names=None,
target=None, response_method='auto', n_cols=3,
grid_resolution=100, percentiles=(0.05, 0.95),
method='auto', n_jobs=None, verbose=0, fig=None,
line_kw=None, contour_kw=None, ax=None):
"""Partial dependence plots.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour plots. The
deciles of the feature values will be shown with tick marks on the x-axes
for one-way plots, and on both axes for two-way plots.
.. note::
:func:`plot_partial_dependence` does not support using the same axes
with multiple calls. To plot the the partial dependence for multiple
estimators, please pass the axes created by the first call to the
second call::
>>> from sklearn.inspection import plot_partial_dependence
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.linear_model import LinearRegression
>>> X, y = make_friedman1()
>>> est = LinearRegression().fit(X, y)
>>> disp1 = plot_partial_dependence(est, X) # doctest: +SKIP
>>> disp2 = plot_partial_dependence(est, X,
... ax=disp1.axes_) # doctest: +SKIP
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
estimator : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
X : {array-like or dataframe} of shape (n_samples, n_features)
``X`` is used to generate a grid of values for the target
``features`` (where the partial dependence will be evaluated), and
also to generate values for the complement features when the
`method` is 'brute'.
features : list of {int, str, pair of int, pair of str}
The target features for which to create the PDPs.
If features[i] is an int or a string, a one-way PDP is created; if
features[i] is a tuple, a two-way PDP is created. Each tuple must be
of size 2.
if any entry is a string, then it must be in ``feature_names``.
feature_names : array-like of shape (n_features,), dtype=str, default=None
Name of each feature; feature_names[i] holds the name of the feature
with index i.
By default, the name of the feature corresponds to their numerical
index for NumPy array and their column name for pandas dataframe.
target : int, optional (default=None)
- In a multiclass setting, specifies the class for which the PDPs
should be computed. Note that for binary classification, the
positive class (index 1) is always used.
- In a multioutput setting, specifies the task for which the PDPs
should be computed.
Ignored in binary classification or classical regression settings.
response_method : 'auto', 'predict_proba' or 'decision_function', \
optional (default='auto')
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist. If
``method`` is 'recursion', the response is always the output of
:term:`decision_function`.
n_cols : int, optional (default=3)
The maximum number of columns in the grid plot. Only active when `ax`
is a single axis or `None`.
grid_resolution : int, optional (default=100)
The number of equally spaced points on the axes of the plots, for each
target feature.
percentiles : tuple of float, optional (default=(0.05, 0.95))
The lower and upper percentile used to create the extreme values
for the PDP axes. Must be in [0, 1].
method : str, optional (default='auto')
The method used to calculate the averaged predictions:
- 'recursion' is only supported for some tree-based estimators (namely
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`)
but is more efficient in terms of speed.
With this method, the target response of a
classifier is always the decision function, not the predicted
probabilities.
- 'brute' is supported for any estimator, but is more
computationally intensive.
- 'auto': the 'recursion' is used for estimators that support it,
and 'brute' is used otherwise.
Please see :ref:`this note <pdp_method_differences>` for
differences between the 'brute' and 'recursion' method.
n_jobs : int, optional (default=None)
The number of CPUs to use to compute the partial dependences.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, optional (default=0)
Verbose output during PD computations.
fig : Matplotlib figure object, optional (default=None)
A figure object onto which the plots will be drawn, after the figure
has been cleared. By default, a new one is created.
.. deprecated:: 0.22
``fig`` will be removed in 0.24.
line_kw : dict, optional
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots.
contour_kw : dict, optional
Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call.
For two-way partial dependence plots.
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
- If a single axis is passed in, it is treated as a bounding axes
and a grid of partial dependence plots will be drawn within
these bounds. The `n_cols` parameter controls the number of
columns in the grid.
- If an array-like of axes are passed in, the partial dependence
plots will be drawn directly into these axes.
- If `None`, a figure and a bounding axes is created and treated
as the single axes case.
.. versionadded:: 0.22
Returns
-------
display: :class:`~sklearn.inspection.PartialDependenceDisplay`
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
See also
--------
sklearn.inspection.partial_dependence: Return raw partial
dependence values
"""
check_matplotlib_support('plot_partial_dependence') # noqa
import matplotlib.pyplot as plt # noqa
from matplotlib import transforms # noqa
from matplotlib.ticker import MaxNLocator # noqa
from matplotlib.ticker import ScalarFormatter # noqa
# set target_idx for multi-class estimators
if hasattr(estimator, 'classes_') and np.size(estimator.classes_) > 2:
if target is None:
raise ValueError('target must be specified for multi-class')
target_idx = np.searchsorted(estimator.classes_, target)
if (not (0 <= target_idx < len(estimator.classes_)) or
estimator.classes_[target_idx] != target):
raise ValueError('target not in est.classes_, got {}'.format(
target))
else:
# regression and binary classification
target_idx = 0
# Use check_array only on lists and other non-array-likes / sparse. Do not
# convert DataFrame into a NumPy array.
if not(hasattr(X, '__array__') or sparse.issparse(X)):
X = check_array(X, force_all_finite='allow-nan', dtype=np.object)
n_features = X.shape[1]
# convert feature_names to list
if feature_names is None:
if hasattr(X, "loc"):
# get the column names for a pandas dataframe
feature_names = X.columns.tolist()
else:
# define a list of numbered indices for a numpy array
feature_names = [str(i) for i in range(n_features)]
elif hasattr(feature_names, "tolist"):
# convert numpy array or pandas index to a list
feature_names = feature_names.tolist()
if len(set(feature_names)) != len(feature_names):
raise ValueError('feature_names should not contain duplicates.')
def convert_feature(fx):
if isinstance(fx, str):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return int(fx)
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral, str)):
fxs = (fxs,)
try:
fxs = tuple(convert_feature(fx) for fx in fxs)
except TypeError:
raise ValueError('Each entry in features must be either an int, '
'a string, or an iterable of size at most 2.')
if not 1 <= np.size(fxs) <= 2:
raise ValueError('Each entry in features must be either an int, '
'a string, or an iterable of size at most 2.')
tmp_features.append(fxs)
features = tmp_features
# Early exit if the axes does not have the correct number of axes
if ax is not None and not isinstance(ax, plt.Axes):
axes = np.asarray(ax, dtype=object)
if axes.size != len(features):
raise ValueError("Expected ax to have {} axes, got {}".format(
len(features), axes.size))
for i in chain.from_iterable(features):
if i >= len(feature_names):
raise ValueError('All entries of features must be less than '
'len(feature_names) = {0}, got {1}.'
.format(len(feature_names), i))
# compute averaged predictions
pd_results = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(estimator, X, fxs,
response_method=response_method,
method=method,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# For multioutput regression, we can only check the validity of target
# now that we have the predictions.
# Also note: as multiclass-multioutput classifiers are not supported,
# multiclass and multioutput scenario are mutually exclusive. So there is
# no risk of overwriting target_idx here.
avg_preds, _ = pd_results[0] # checking the first result is enough
if is_regressor(estimator) and avg_preds.shape[0] > 1:
if target is None:
raise ValueError(
'target must be specified for multi-output regressors')
if not 0 <= target <= avg_preds.shape[0]:
raise ValueError(
'target must be in [0, n_tasks], got {}.'.format(target))
target_idx = target
# get global min and max average predictions of PD grouped by plot type
pdp_lim = {}
for avg_preds, values in pd_results:
min_pd = avg_preds[target_idx].min()
max_pd = avg_preds[target_idx].max()
n_fx = len(values)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
deciles = {}
for fx in chain.from_iterable(features):
if fx not in deciles:
X_col = _safe_indexing(X, fx, axis=1)
deciles[fx] = mquantiles(X_col, prob=np.arange(0.1, 1.0, 0.1))
if fig is not None:
warnings.warn("The fig parameter is deprecated in version "
"0.22 and will be removed in version 0.24",
FutureWarning)
fig.clear()
ax = fig.gca()
display = PartialDependenceDisplay(pd_results, features, feature_names,
target_idx, pdp_lim, deciles)
return display.plot(ax=ax, n_cols=n_cols, line_kw=line_kw,
contour_kw=contour_kw)
|
27,982 | def remove_report_from_plist(plist_file_obj, skip_handler):
"""
Parse the original plist content provided by the analyzer
and return a new plist content where reports were removed
if they should be skipped. If the remove failed for some reason None
will be returned.
WARN !!!!
If the 'files' array in the plist is modified all of the
diagnostic section (control, event ...) nodes should be
re indexed to use the proper file array indexes!!!
"""
report_data = None
try:
report_data = parse_plist(plist_file_obj)
except plistlib.InvalidFileException as ifex:
LOG.warning('Invalid plist file')
return None
except (ExpatError, TypeError, AttributeError) as ex:
LOG.error("Failed to parse plist content, "
"keeping the original version")
LOG.error(ex)
return None
file_ids_to_remove = []
try:
for i, f in enumerate(report_data['files']):
if skip_handler.should_skip(f):
file_ids_to_remove.append(i)
kept_diagnostics, kept_files = get_kept_report_data(report_data,
file_ids_to_remove)
report_data['diagnostics'] = kept_diagnostics
report_data['files'] = kept_files
return plistlib.dumps(report_data)
except KeyError:
LOG.error("Failed to modify plist content, "
"keeping the original version")
return None
| def remove_report_from_plist(plist_file_obj, skip_handler):
"""
Parse the original plist content provided by the analyzer
and return a new plist content where reports were removed
if they should be skipped. If the remove failed for some reason None
will be returned.
WARN !!!!
If the 'files' array in the plist is modified all of the
diagnostic section (control, event ...) nodes should be
re indexed to use the proper file array indexes!!!
"""
report_data = None
try:
report_data = parse_plist(plist_file_obj)
except plistlib.InvalidFileException as ifex:
LOG.warning('Invalid plist file')
return
except (ExpatError, TypeError, AttributeError) as ex:
LOG.error("Failed to parse plist content, "
"keeping the original version")
LOG.error(ex)
return None
file_ids_to_remove = []
try:
for i, f in enumerate(report_data['files']):
if skip_handler.should_skip(f):
file_ids_to_remove.append(i)
kept_diagnostics, kept_files = get_kept_report_data(report_data,
file_ids_to_remove)
report_data['diagnostics'] = kept_diagnostics
report_data['files'] = kept_files
return plistlib.dumps(report_data)
except KeyError:
LOG.error("Failed to modify plist content, "
"keeping the original version")
return None
|
57,610 | def fetch_main_power_df(
zone_key=None,
sorted_zone_keys=None,
session=None,
target_datetime=None,
logger=logging.getLogger(__name__),
) -> Union[Tuple[pd.DataFrame, pd.Series], pd.DataFrame]:
df, region, filtered_datasets = _fetch_main_df(
"power",
zone_key=zone_key,
sorted_zone_keys=sorted_zone_keys,
session=session,
target_datetime=target_datetime,
logger=logger,
)
# Solar rooftop is a special case
df = process_solar_rooftop(df)
logger.debug("Preparing capacities..")
if region:
capacities = get_capacities(filtered_datasets, region)
return df, capacities
else:
return df
| def fetch_main_power_df(
zone_key=None,
sorted_zone_keys=None,
session=None,
target_datetime=None,
logger=logging.getLogger(__name__),
) -> Union[Tuple[pd.DataFrame, pd.Series], pd.DataFrame]:
df, region, filtered_datasets = _fetch_main_df(
"power",
zone_key=zone_key,
sorted_zone_keys=sorted_zone_keys,
session=session,
target_datetime=target_datetime,
logger=logger,
)
# Solar rooftop is a special case
df = process_solar_rooftop(df)
logger.debug("Preparing capacities..")
if region:
capacities = get_capacities(filtered_datasets, region)
return df, capacities
else:
return df, None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.