id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
32,812 |
def get_request_uri(request):
"""
Helper to rebuild the original request url
query string or fragments are not included.
"""
# DEV: Use django.http.request.HttpRequest._get_raw_host() when available
# otherwise back-off to PEP 333 as done in django 1.8.x
if hasattr(request, "_get_raw_host"):
host = request._get_raw_host()
else:
try:
# Try to build host how Django would have
# https://github.com/django/django/blob/e8d0d2a5efc8012dcc8bf1809dec065ebde64c81/django/http/request.py#L85-L102
if "HTTP_HOST" in request.META:
host = request.META["HTTP_HOST"]
else:
host = request.META["SERVER_NAME"]
port = str(request.META["SERVER_PORT"])
if port != ("443" if request.is_secure() else "80"):
host = "{0}:{1}".format(host, port)
except Exception:
# This really shouldn't ever happen, but lets guard here just in case
log.debug("Failed to build Django request host", exc_info=True)
host = "unknown"
# If request scheme is missing, possible in case where wsgi.url_scheme
# environ has not been set, then default to http
scheme = request.scheme or "http"
# Build request url from the information available
# DEV: We are explicitly omitting query strings since they may contain sensitive information
urlparts = dict(scheme=scheme, netloc=host, path=request.path, params=None, query=None, fragment=None)
# If any url part is a SimpleLazyObject, use it's __class__ property to cast
# str/bytes and allow for _setup() to execute
for (k, v) in urlparts.items():
if isinstance(v, SimpleLazyObject):
if v.__class__ == str:
v = str(v)
elif v.__class__ == bytes:
v = bytes(v)
urlparts[k] = v
# DEV: With PY3 urlunparse calls urllib.parse._coerce_args which uses the
# type of the scheme to check the type to expect from all url parts, raising
# a TypeError otherwise. If the scheme is not a str, the function returns
# the url parts bytes decoded along with a function to encode the result of
# combining the url parts. We returns a byte string when all url parts are
# byte strings.
# https://github.com/python/cpython/blob/02d126aa09d96d03dcf9c5b51c858ce5ef386601/Lib/urllib/parse.py#L111-L125
if PY3 and not all(isinstance(value, binary_type) or value is None for value in urlparts.values()):
for (key, value) in urlparts.items():
if value is not None and isinstance(value, binary_type):
urlparts[key] = to_unicode(value)
return parse.urlunparse(parse.ParseResult(**urlparts))
|
def get_request_uri(request):
"""
Helper to rebuild the original request url
query string or fragments are not included.
"""
# DEV: Use django.http.request.HttpRequest._get_raw_host() when available
# otherwise back-off to PEP 333 as done in django 1.8.x
if hasattr(request, "_get_raw_host"):
host = request._get_raw_host()
else:
try:
# Try to build host how Django would have
# https://github.com/django/django/blob/e8d0d2a5efc8012dcc8bf1809dec065ebde64c81/django/http/request.py#L85-L102
if "HTTP_HOST" in request.META:
host = request.META["HTTP_HOST"]
else:
host = request.META["SERVER_NAME"]
port = str(request.META["SERVER_PORT"])
if port != ("443" if request.is_secure() else "80"):
host = "{0}:{1}".format(host, port)
except Exception:
# This really shouldn't ever happen, but lets guard here just in case
log.debug("Failed to build Django request host", exc_info=True)
host = "unknown"
# If request scheme is missing, possible in case where wsgi.url_scheme
# environ has not been set, then default to http
scheme = request.scheme or "http"
# Build request url from the information available
# DEV: We are explicitly omitting query strings since they may contain sensitive information
urlparts = dict(scheme=scheme, netloc=host, path=request.path, params=None, query=None, fragment=None)
# If any url part is a SimpleLazyObject, use its __class__ property to cast
# str/bytes and allow for _setup() to execute
for (k, v) in urlparts.items():
if isinstance(v, SimpleLazyObject):
if v.__class__ == str:
v = str(v)
elif v.__class__ == bytes:
v = bytes(v)
urlparts[k] = v
# DEV: With PY3 urlunparse calls urllib.parse._coerce_args which uses the
# type of the scheme to check the type to expect from all url parts, raising
# a TypeError otherwise. If the scheme is not a str, the function returns
# the url parts bytes decoded along with a function to encode the result of
# combining the url parts. We returns a byte string when all url parts are
# byte strings.
# https://github.com/python/cpython/blob/02d126aa09d96d03dcf9c5b51c858ce5ef386601/Lib/urllib/parse.py#L111-L125
if PY3 and not all(isinstance(value, binary_type) or value is None for value in urlparts.values()):
for (key, value) in urlparts.items():
if value is not None and isinstance(value, binary_type):
urlparts[key] = to_unicode(value)
return parse.urlunparse(parse.ParseResult(**urlparts))
|
13,338 |
def volume_lock(request, object_id):
volume = models.Volume.objects.get(id=object_id)
assert(volume.vol_encrypt > 0)
if request.method == "POST":
_n = notifier()
if '__confirm' not in request.POST and not _n.is_freenas() and _n.failover_licensed():
remaining_volumes = [v for v in models.Volume.objects.exclude(pk=object_id) if v.is_decrypted()]
if not remaining_volumes:
message = render_to_string('freeadmin/generic_model_confirm.html', {
'message': 'Warning: Locking this volume will prevent failover from functioning correctly.<br />Do you want to continue?',
})
return JsonResp(request, confirm=message)
notifier().volume_detach(volume)
if hasattr(notifier, 'failover_status') and notifier().failover_status() == 'MASTER':
from freenasUI.failover.enc_helper import LocalEscrowCtl
escrowctl = LocalEscrowCtl()
escrowctl.clear()
try:
os.unlink('/tmp/.failover_master')
except Exception:
pass
try:
with client as c:
c.call('failover.call_remote', 'failover.encryption_clearkey')
except Exception:
log.warn('Failed to clear key on standby node, is it down?', exc_info=True)
notifier().restart("system_datasets")
return JsonResp(request, message=_("Volume locked"))
with client as c:
sys_dataset = c.call('systemdataset.config')
if volume.vol_name == sys_dataset['pool']:
return render(
request,
'freeadmin/generic_model_dialog.html', {
'msg': 'Pool contains the system dataset and cannot be locked. Please select a different pool '
'or configure the system dataset to be on a different pool.'
}
)
return render(request, "storage/lock.html")
|
def volume_lock(request, object_id):
volume = models.Volume.objects.get(id=object_id)
assert(volume.vol_encrypt > 0)
if request.method == "POST":
_n = notifier()
if '__confirm' not in request.POST and not _n.is_freenas() and _n.failover_licensed():
remaining_volumes = [v for v in models.Volume.objects.exclude(pk=object_id) if v.is_decrypted()]
if not remaining_volumes:
message = render_to_string('freeadmin/generic_model_confirm.html', {
'message': 'Warning: Locking this volume will prevent failover from functioning correctly.<br />Do you want to continue?',
})
return JsonResp(request, confirm=message)
notifier().volume_detach(volume)
if hasattr(notifier, 'failover_status') and notifier().failover_status() == 'MASTER':
from freenasUI.failover.enc_helper import LocalEscrowCtl
escrowctl = LocalEscrowCtl()
escrowctl.clear()
try:
os.unlink('/tmp/.failover_master')
except Exception:
pass
try:
with client as c:
c.call('failover.call_remote', 'failover.encryption_clearkey')
except Exception:
log.warn('Failed to clear key on standby node, is it down?', exc_info=True)
notifier().restart("system_datasets")
return JsonResp(request, message=_("Volume locked"))
with client as c:
sys_dataset = c.call('systemdataset.config')
if volume.vol_name == sys_dataset['pool']:
return render(
request,
'freeadmin/generic_model_dialog.html', {
'msg': 'Pool contains the system dataset and cannot be locked. Please select a different pool '
'system dataset to a pool that has no passphrase.'
}
)
return render(request, "storage/lock.html")
|
32,927 |
def patch():
if getattr(graphql, "_datadog_patch", False):
return
elif graphql_version < (1, 1):
return
setattr(graphql, "_datadog_patch", True)
pin = Pin()
_w(graphql, "graphql", _traced_graphql)
_w(graphql, "graphql_sync", _traced_graphql_sync)
pin.onto(graphql)
|
def patch():
if getattr(graphql, "_datadog_patch", False) or graphql_version < (1, 1):
return
setattr(graphql, "_datadog_patch", True)
pin = Pin()
_w(graphql, "graphql", _traced_graphql)
_w(graphql, "graphql_sync", _traced_graphql_sync)
pin.onto(graphql)
|
42,395 |
def plot_bands(
arr, cmap="Greys_r", figsize=(12, 12), cols=3, title=None, extent=None
):
"""Plot each band in a numpy array in its own axis.
Assumes band order (band, row, col).
Parameters
----------
arr : numpy array
An n-dimensional numpy array to plot.
cmap : str (default = "Greys_r")
Colormap name for plots.
figsize : tuple (default = (12, 12))
Figure size in inches.
cols : int (default = 3)
Number of columns for plot grid.
title : str or list (optional)
Title of one band or list of titles with one title per band.
extent : tuple (optional)
Bounding box that the data will fill: (minx, miny, maxx, maxy).
Returns
----------
tuple
fig : figure object
The figure of the plotted band(s).
ax or axs : axes object(s)
The axes object(s) associated with the plot.
Example
-------
.. plot::
>>> import matplotlib.pyplot as plt
>>> import earthpy.plot as ep
>>> from earthpy.io import path_to_example
>>> import rasterio as rio
>>> titles = ['Red', 'Green', 'Blue']
>>> with rio.open(path_to_example('rmnp-rgb.tif')) as src:
... ep.plot_bands(src.read(),
... title=titles,
... figsize=(8, 3))
(<Figure size ... with 3 Axes>, ...)
"""
try:
arr.ndim
except AttributeError:
"Input arr should be a numpy array"
if title:
if isinstance(title, str):
title = [title]
# A 2-dim array should only be passed one title
if (arr.ndim == 2) and (len(title) > 1):
raise ValueError(
"""Plot_bands() expects one title for a single
band array. You have provided more than one
title."""
)
# A 3 dim array should have the same number of titles as dims
if arr.ndim > 2:
if not (len(title) == arr.shape[0]):
raise ValueError(
"""Plot_bands() expects the number of plot titles
to equal the number of array raster layers."""
)
# If the array is 3 dimensional setup grid plotting
if arr.ndim > 2 and arr.shape[0] > 1:
# Calculate the total rows that will be required to plot each band
plot_rows = int(np.ceil(arr.shape[0] / cols))
total_layers = arr.shape[0]
# Plot all bands
fig, axs = plt.subplots(plot_rows, cols, figsize=figsize)
axs_ravel = axs.ravel()
for ax, i in zip(axs_ravel, range(total_layers)):
band = i + 1
ax.imshow(es.bytescale(arr[i]), cmap=cmap)
if title:
ax.set(title=title[i])
else:
ax.set(title="Band %i" % band)
ax.set(xticks=[], yticks=[])
# This loop clears out the plots for axes which are empty
# A matplotlib axis grid is always uniform with x cols and x rows
# eg: an 8 band plot with 3 cols will always be 3 x 3
for ax in axs_ravel[total_layers:]:
ax.set_axis_off()
ax.set(xticks=[], yticks=[])
plt.tight_layout()
return fig, axs
elif arr.ndim == 2 or arr.shape[0] == 1:
# If it's a 2 dimensional array with a 3rd dimension
arr = np.squeeze(arr)
fig, ax = plt.subplots(figsize=figsize)
ax.imshow(es.bytescale(arr), cmap=cmap, extent=extent)
if title:
ax.set(title=title)
ax.set(xticks=[], yticks=[])
return fig, ax
|
def plot_bands(
arr, cmap="Greys_r", figsize=(12, 12), cols=3, title=None, extent=None
):
"""Plot each band in a numpy array in its own axis.
Assumes band order (band, row, col).
Parameters
----------
arr : numpy array
An n-dimensional numpy array to plot.
cmap : str (default = "Greys_r")
Colormap name for plots.
figsize : tuple (default = (12, 12))
Figure size in inches.
cols : int (default = 3)
Number of columns for plot grid.
title : str or list (optional)
Title of one band or list of titles with one title per band.
extent : tuple (optional)
Bounding box that the data will fill: (minx, miny, maxx, maxy).
Returns
----------
tuple
fig : figure object
The figure of the plotted band(s).
ax or axs : axes object(s)
The axes object(s) associated with the plot.
Example
-------
.. plot::
>>> import matplotlib.pyplot as plt
>>> import earthpy.plot as ep
>>> from earthpy.io import path_to_example
>>> import rasterio as rio
>>> titles = ['Red', 'Green', 'Blue']
>>> with rio.open(path_to_example('rmnp-rgb.tif')) as src:
... ep.plot_bands(src.read(),
... title=titles,
... figsize=(8, 3))
(<Figure size ... with 3 Axes>, ...)
"""
try:
arr.ndim
except AttributeError:
"Input arr should be a numpy array"
if title:
if isinstance(title, str):
title = [title]
# A 2-dim array should only be passed one title
if arr.ndim == 2 and len(title) > 1:
raise ValueError(
"""Plot_bands() expects one title for a single
band array. You have provided more than one
title."""
)
# A 3 dim array should have the same number of titles as dims
if arr.ndim > 2:
if not (len(title) == arr.shape[0]):
raise ValueError(
"""Plot_bands() expects the number of plot titles
to equal the number of array raster layers."""
)
# If the array is 3 dimensional setup grid plotting
if arr.ndim > 2 and arr.shape[0] > 1:
# Calculate the total rows that will be required to plot each band
plot_rows = int(np.ceil(arr.shape[0] / cols))
total_layers = arr.shape[0]
# Plot all bands
fig, axs = plt.subplots(plot_rows, cols, figsize=figsize)
axs_ravel = axs.ravel()
for ax, i in zip(axs_ravel, range(total_layers)):
band = i + 1
ax.imshow(es.bytescale(arr[i]), cmap=cmap)
if title:
ax.set(title=title[i])
else:
ax.set(title="Band %i" % band)
ax.set(xticks=[], yticks=[])
# This loop clears out the plots for axes which are empty
# A matplotlib axis grid is always uniform with x cols and x rows
# eg: an 8 band plot with 3 cols will always be 3 x 3
for ax in axs_ravel[total_layers:]:
ax.set_axis_off()
ax.set(xticks=[], yticks=[])
plt.tight_layout()
return fig, axs
elif arr.ndim == 2 or arr.shape[0] == 1:
# If it's a 2 dimensional array with a 3rd dimension
arr = np.squeeze(arr)
fig, ax = plt.subplots(figsize=figsize)
ax.imshow(es.bytescale(arr), cmap=cmap, extent=extent)
if title:
ax.set(title=title)
ax.set(xticks=[], yticks=[])
return fig, ax
|
21,647 |
def _setup_jemalloc_stats():
"""Checks to see if jemalloc is loaded, and hooks up a collector to record
statistics exposed by jemalloc.
"""
# Try to find the loaded jemalloc shared library, if any. We need to
# introspect into what is loaded, rather than loading whatever is on the
# path, as if we load a *different* jemalloc version things will seg fault.
pid = os.getpid()
# We're looking for a path at the end of the line that includes
# "libjemalloc".
regex = re.compile(r"/\S+/libjemalloc.*$")
jemalloc_path = None
with open(f"/proc/{pid}/maps") as f:
for line in f.readlines():
match = regex.search(line.strip())
if match:
jemalloc_path = match.group()
if not jemalloc_path:
# No loaded jemalloc was found.
return
jemalloc = ctypes.CDLL(jemalloc_path)
def _mallctl(
name: str, read: bool = True, write: Optional[int] = None
) -> Optional[int]:
"""Wrapper around `mallctl` for reading and writing integers to
jemalloc.
Args:
name: The name of the option to read from/write to.
read: Whether to try and read the value.
write: The value to write, if given.
Returns:
The value read if `read` is True, otherwise None.
Raises:
An exception if `mallctl` returns a non-zero error code.
"""
input_var = None
input_var_ref = None
input_len_ref = None
if read:
input_var = ctypes.c_size_t(0)
input_len = ctypes.c_size_t(ctypes.sizeof(input_var))
input_var_ref = ctypes.byref(input_var)
input_len_ref = ctypes.byref(input_len)
write_var_ref = None
write_len = ctypes.c_size_t(0)
if write is not None:
write_var = ctypes.c_size_t(write)
write_len = ctypes.c_size_t(ctypes.sizeof(write_var))
write_var_ref = ctypes.byref(write_var)
# The interface is:
#
# int mallctl(
# const char *name,
# void *oldp,
# size_t *oldlenp,
# void *newp,
# size_t newlen
# )
#
# Where oldp/oldlenp is a buffer where the old value will be written to
# (if not null), and newp/newlen is the buffer with the new value to set
# (if not null). Note that they're all references *except* newlen.
result = jemalloc.mallctl(
name.encode("ascii"),
input_var_ref,
input_len_ref,
write_var_ref,
write_len,
)
if result != 0:
raise Exception("Failed to call mallctl")
if input_var is None:
return None
return input_var.value
def _jemalloc_refresh_stats() -> None:
"""Request that jemalloc updates its internal statistics. This needs to
be called before querying for stats, otherwise it will return stale
values.
"""
try:
_mallctl("epoch", read=False, write=1)
except Exception:
pass
class JemallocCollector:
"""Metrics for internal jemalloc stats."""
def collect(self):
_jemalloc_refresh_stats()
g = GaugeMetricFamily(
"jemalloc_stats_app_memory",
"The stats reported by jemalloc",
labels=["type"],
)
# Read the relevant global stats from jemalloc. Note that these may
# not be accurate if python is configured to use its internal small
# object allocator (which is on by default, disable by setting the
# env `PYTHONMALLOC=malloc`).
#
# See the jemalloc manpage for details about what each value means,
# roughly:
# - allocated ─ Total number of bytes allocated by the app
# - active ─ Total number of bytes in active pages allocated by
# the application, this is bigger than `allocated`.
# - resident ─ Maximum number of bytes in physically resident data
# pages mapped by the allocator, comprising all pages dedicated
# to allocator metadata, pages backing active allocations, and
# unused dirty pages. This is bigger than `active`.
# - mapped ─ Total number of bytes in active extents mapped by the
# allocator.
# - metadata ─ Total number of bytes dedicated to jemalloc
# metadata.
for t in (
"allocated",
"active",
"resident",
"mapped",
"metadata",
):
try:
value = _mallctl(f"stats.{t}")
except Exception:
# There was an error fetching the value, skip.
continue
g.add_metric([t], value=value)
yield g
REGISTRY.register(JemallocCollector())
|
def _setup_jemalloc_stats():
"""Checks to see if jemalloc is loaded, and hooks up a collector to record
statistics exposed by jemalloc.
"""
# Try to find the loaded jemalloc shared library, if any. We need to
# introspect into what is loaded, rather than loading whatever is on the
# path, as if we load a *different* jemalloc version things will seg fault.
pid = os.getpid()
# We're looking for a path at the end of the line that includes
# "libjemalloc".
regex = re.compile(r"/\S+/libjemalloc.*$")
jemalloc_path = None
with open(f"/proc/{pid}/maps") as f:
for line in f:
match = regex.search(line.strip())
if match:
jemalloc_path = match.group()
if not jemalloc_path:
# No loaded jemalloc was found.
return
jemalloc = ctypes.CDLL(jemalloc_path)
def _mallctl(
name: str, read: bool = True, write: Optional[int] = None
) -> Optional[int]:
"""Wrapper around `mallctl` for reading and writing integers to
jemalloc.
Args:
name: The name of the option to read from/write to.
read: Whether to try and read the value.
write: The value to write, if given.
Returns:
The value read if `read` is True, otherwise None.
Raises:
An exception if `mallctl` returns a non-zero error code.
"""
input_var = None
input_var_ref = None
input_len_ref = None
if read:
input_var = ctypes.c_size_t(0)
input_len = ctypes.c_size_t(ctypes.sizeof(input_var))
input_var_ref = ctypes.byref(input_var)
input_len_ref = ctypes.byref(input_len)
write_var_ref = None
write_len = ctypes.c_size_t(0)
if write is not None:
write_var = ctypes.c_size_t(write)
write_len = ctypes.c_size_t(ctypes.sizeof(write_var))
write_var_ref = ctypes.byref(write_var)
# The interface is:
#
# int mallctl(
# const char *name,
# void *oldp,
# size_t *oldlenp,
# void *newp,
# size_t newlen
# )
#
# Where oldp/oldlenp is a buffer where the old value will be written to
# (if not null), and newp/newlen is the buffer with the new value to set
# (if not null). Note that they're all references *except* newlen.
result = jemalloc.mallctl(
name.encode("ascii"),
input_var_ref,
input_len_ref,
write_var_ref,
write_len,
)
if result != 0:
raise Exception("Failed to call mallctl")
if input_var is None:
return None
return input_var.value
def _jemalloc_refresh_stats() -> None:
"""Request that jemalloc updates its internal statistics. This needs to
be called before querying for stats, otherwise it will return stale
values.
"""
try:
_mallctl("epoch", read=False, write=1)
except Exception:
pass
class JemallocCollector:
"""Metrics for internal jemalloc stats."""
def collect(self):
_jemalloc_refresh_stats()
g = GaugeMetricFamily(
"jemalloc_stats_app_memory",
"The stats reported by jemalloc",
labels=["type"],
)
# Read the relevant global stats from jemalloc. Note that these may
# not be accurate if python is configured to use its internal small
# object allocator (which is on by default, disable by setting the
# env `PYTHONMALLOC=malloc`).
#
# See the jemalloc manpage for details about what each value means,
# roughly:
# - allocated ─ Total number of bytes allocated by the app
# - active ─ Total number of bytes in active pages allocated by
# the application, this is bigger than `allocated`.
# - resident ─ Maximum number of bytes in physically resident data
# pages mapped by the allocator, comprising all pages dedicated
# to allocator metadata, pages backing active allocations, and
# unused dirty pages. This is bigger than `active`.
# - mapped ─ Total number of bytes in active extents mapped by the
# allocator.
# - metadata ─ Total number of bytes dedicated to jemalloc
# metadata.
for t in (
"allocated",
"active",
"resident",
"mapped",
"metadata",
):
try:
value = _mallctl(f"stats.{t}")
except Exception:
# There was an error fetching the value, skip.
continue
g.add_metric([t], value=value)
yield g
REGISTRY.register(JemallocCollector())
|
31,774 |
def add_host_command(client: Client, args: Dict[str, Any]) -> CommandResults:
entity_id = args.get('entity-id')
entity_name = args.get('entity-name')
name = args.get('name')
short_desc = args.get('short-description')
long_desc = args.get('long-description')
risk_level = args.get('risk-level')
threat_level = args.get('threat-level')
threat_level_comments = args.get('threat-level-comments')
status = args.get('host-status')
host_zone = args.get('host-zone')
use_eventlog_credentials = argToBoolean(args.get('use-eventlog-credentials'))
os_type = args.get('os-type')
os = args.get('os')
response = client.add_host_request(entity_id, entity_name, name, short_desc, long_desc, risk_level, threat_level,
threat_level_comments, status, host_zone, use_eventlog_credentials,
os, os_type)
hr = tableToMarkdown('Host added successfully', response, headerTransform=pascalToSpace)
command_results = CommandResults(
readable_output=hr,
outputs_prefix='LogRhythm.Host',
outputs_key_field='id',
outputs=response,
raw_response=response
)
return command_results
|
def add_host_command(client: Client, args: Dict[str, Any]) -> CommandResults:
entity_id = args.get('entity-id')
entity_name = args.get('entity-name')
name = args.get('name')
short_desc = args.get('short-description')
long_desc = args.get('long-description')
risk_level = args.get('risk-level')
threat_level = args.get('threat-level')
threat_level_comments = args.get('threat-level-comments')
status = args.get('host-status')
host_zone = args.get('host-zone')
use_eventlog_credentials = argToBoolean(args.get('use-eventlog-credentials'))
os_type = args.get('os-type')
os = args.get('os')
response = client.add_host_request(entity_id, entity_name, name, short_desc, long_desc, risk_level, threat_level,
threat_level_comments, status, host_zone, use_eventlog_credentials,
os, os_type)
hr = tableToMarkdown('Host added successfully', response, headerTransform=pascalToSpace)
command_results = CommandResults(
readable_output=hr,
outputs_prefix='LogRhythm.Host',
outputs_key_field='id',
outputs=response,
raw_response=response,
)
return command_results
|
43,292 |
def _chebyshev(one_hot_encoded_row, laplacian, coeffs, deg, max_eig):
"""
This function calculates one column of the Chebyshev approximation of exp(-scale * laplacian) for
all scales.
Args:
one_hot_encoded_row (SparseTensor): a sparse tensor indicating which column (node) to calculate.
laplacian (SparseTensor): the unormalized graph laplacian
coeffs: the Chebyshev coefficients for exp(-scale * x) for each scale in the shape (num_scales, deg)
deg: the degree of the Chebyshev polynomial
Returns:
(num_scales, num_nodes) tensor of the wavelets for each scale for the specified node.
"""
a = max_eig / 2
T_0 = tf.reshape(
tf.sparse.to_dense(one_hot_encoded_row), shape=(laplacian.shape[0], 1)
)
T_1 = (K.dot(laplacian, T_0) - a * T_0) / a
cheby_polys = [T_0, T_1]
for i in range(deg - 1):
cheby_poly = (2 / a) * (
K.dot(laplacian, cheby_polys[-1]) - a * cheby_polys[-1]
) - cheby_polys[-2]
cheby_polys.append(cheby_poly)
cheby_polys = K.squeeze(tf.stack(cheby_polys, axis=0), axis=-1)
return tf.matmul(coeffs, cheby_polys)
|
def _chebyshev(one_hot_encoded_row, laplacian, coeffs, max_eig):
"""
This function calculates one column of the Chebyshev approximation of exp(-scale * laplacian) for
all scales.
Args:
one_hot_encoded_row (SparseTensor): a sparse tensor indicating which column (node) to calculate.
laplacian (SparseTensor): the unormalized graph laplacian
coeffs: the Chebyshev coefficients for exp(-scale * x) for each scale in the shape (num_scales, deg)
deg: the degree of the Chebyshev polynomial
Returns:
(num_scales, num_nodes) tensor of the wavelets for each scale for the specified node.
"""
a = max_eig / 2
T_0 = tf.reshape(
tf.sparse.to_dense(one_hot_encoded_row), shape=(laplacian.shape[0], 1)
)
T_1 = (K.dot(laplacian, T_0) - a * T_0) / a
cheby_polys = [T_0, T_1]
for i in range(deg - 1):
cheby_poly = (2 / a) * (
K.dot(laplacian, cheby_polys[-1]) - a * cheby_polys[-1]
) - cheby_polys[-2]
cheby_polys.append(cheby_poly)
cheby_polys = K.squeeze(tf.stack(cheby_polys, axis=0), axis=-1)
return tf.matmul(coeffs, cheby_polys)
|
47,995 |
def add_model_pages(output_root, parent_element, group, group_title):
group_element = add_page(output_root, parent_element, title=group_title,
id=f'omz_models_group_{group}', path=f'models/{group}/index.md')
task_type_elements = {}
device_support_path = OMZ_ROOT / 'models' / group / 'device_support.md'
with (device_support_path).open('r', encoding="utf-8") as device_support_file:
raw_device_support = device_support_file.read()
device_support_lines = re.findall(r'^\|\s\S+\s\|', raw_device_support, re.MULTILINE)
device_support_lines = [device_support_line.strip(' |')
for device_support_line in device_support_lines]
for md_path in sorted(OMZ_ROOT.glob(f'models/{group}/*/**/*.md')):
md_path_rel = md_path.relative_to(OMZ_ROOT)
model_name = md_path_rel.parts[2]
device_support_path_rel = device_support_path.relative_to(OMZ_ROOT)
if model_name not in device_support_lines:
if not (md_path.parent / 'composite-model.yml').exists():
raise RuntimeError(f'{device_support_path_rel}: "{model_name}" '
'model reference is missing.')
model_subdirs = (subdir.name for subdir in md_path.parent.glob('*/**'))
for model_subdir in model_subdirs:
if not (md_path.parent / model_subdir / 'model.yml').exists():
continue # non-model folder
if model_subdir not in device_support_lines:
raise RuntimeError(f'{device_support_path_rel}: '
f'"{model_subdir}" part reference of '
f'"{model_name}" composite model is missing.')
expected_md_path = Path('models', group, model_name, 'README.md')
if md_path_rel != expected_md_path:
raise RuntimeError(f'{md_path_rel}: unexpected documentation file,'
' should be {expected_md_path}')
# FIXME: use the info dumper to query model information instead of
# parsing the configs. We're not doing that now, because the info
# dumper doesn't support composite models yet.
model_yml_path = OMZ_ROOT / 'models' / group / model_name / 'model.yml'
composite_model_yml_path = model_yml_path.with_name('composite-model.yml')
if model_yml_path.exists():
expected_title = model_name
with open(model_yml_path, 'rb') as f:
config = yaml.safe_load(f)
task_type = config['task_type']
elif composite_model_yml_path.exists():
expected_title = f'{model_name} (composite)'
with open(composite_model_yml_path, 'rb') as f:
config = yaml.safe_load(f)
task_type = config['task_type']
else:
logging.warning(
'{}: no corresponding model.yml or composite-model.yml found; skipping'
.format(md_path_rel))
continue
if task_type not in task_type_elements:
human_readable_task_type = HUMAN_READABLE_TASK_TYPES.get(task_type,
task_type.replace('_', ' ').title())
task_type_elements[task_type] = add_page(output_root, group_element,
title=f'{human_readable_task_type} Models')
# All model names are unique, so we don't need to include the group
# in the page ID. However, we do prefix "model_", so that model pages
# don't conflict with any other pages in the omz_models namespace that
# might be added later.
page_id = 'omz_models_model_' + re.sub(r'[^a-zA-Z0-9]', '_', model_name)
model_element = add_page(output_root, task_type_elements[task_type],
id=page_id, path=md_path_rel)
if model_element.attrib['title'] != expected_title:
raise RuntimeError(f'{md_path_rel}: should have title "{expected_title}"')
sort_titles(group_element)
|
def add_model_pages(output_root, parent_element, group, group_title):
group_element = add_page(output_root, parent_element, title=group_title,
id=f'omz_models_group_{group}', path=f'models/{group}/index.md')
task_type_elements = {}
device_support_path = OMZ_ROOT / 'models' / group / 'device_support.md'
with device_support_path.open('r', encoding="utf-8") as device_support_file:
raw_device_support = device_support_file.read()
device_support_lines = re.findall(r'^\|\s\S+\s\|', raw_device_support, re.MULTILINE)
device_support_lines = [device_support_line.strip(' |')
for device_support_line in device_support_lines]
for md_path in sorted(OMZ_ROOT.glob(f'models/{group}/*/**/*.md')):
md_path_rel = md_path.relative_to(OMZ_ROOT)
model_name = md_path_rel.parts[2]
device_support_path_rel = device_support_path.relative_to(OMZ_ROOT)
if model_name not in device_support_lines:
if not (md_path.parent / 'composite-model.yml').exists():
raise RuntimeError(f'{device_support_path_rel}: "{model_name}" '
'model reference is missing.')
model_subdirs = (subdir.name for subdir in md_path.parent.glob('*/**'))
for model_subdir in model_subdirs:
if not (md_path.parent / model_subdir / 'model.yml').exists():
continue # non-model folder
if model_subdir not in device_support_lines:
raise RuntimeError(f'{device_support_path_rel}: '
f'"{model_subdir}" part reference of '
f'"{model_name}" composite model is missing.')
expected_md_path = Path('models', group, model_name, 'README.md')
if md_path_rel != expected_md_path:
raise RuntimeError(f'{md_path_rel}: unexpected documentation file,'
' should be {expected_md_path}')
# FIXME: use the info dumper to query model information instead of
# parsing the configs. We're not doing that now, because the info
# dumper doesn't support composite models yet.
model_yml_path = OMZ_ROOT / 'models' / group / model_name / 'model.yml'
composite_model_yml_path = model_yml_path.with_name('composite-model.yml')
if model_yml_path.exists():
expected_title = model_name
with open(model_yml_path, 'rb') as f:
config = yaml.safe_load(f)
task_type = config['task_type']
elif composite_model_yml_path.exists():
expected_title = f'{model_name} (composite)'
with open(composite_model_yml_path, 'rb') as f:
config = yaml.safe_load(f)
task_type = config['task_type']
else:
logging.warning(
'{}: no corresponding model.yml or composite-model.yml found; skipping'
.format(md_path_rel))
continue
if task_type not in task_type_elements:
human_readable_task_type = HUMAN_READABLE_TASK_TYPES.get(task_type,
task_type.replace('_', ' ').title())
task_type_elements[task_type] = add_page(output_root, group_element,
title=f'{human_readable_task_type} Models')
# All model names are unique, so we don't need to include the group
# in the page ID. However, we do prefix "model_", so that model pages
# don't conflict with any other pages in the omz_models namespace that
# might be added later.
page_id = 'omz_models_model_' + re.sub(r'[^a-zA-Z0-9]', '_', model_name)
model_element = add_page(output_root, task_type_elements[task_type],
id=page_id, path=md_path_rel)
if model_element.attrib['title'] != expected_title:
raise RuntimeError(f'{md_path_rel}: should have title "{expected_title}"')
sort_titles(group_element)
|
30,732 |
def panorama_get_predefined_threats_list_command():
target = str(demisto.args()['target']) if 'target' in demisto.args() else None
result = panorama_get_predefined_threats_list(target)
demisto.results(fileResult('predefined-threats.json', json.dumps(result['response']['result']).encode('utf-8')))
|
def panorama_get_predefined_threats_list_command():
target = str(demisto.args().get('target',''))
result = panorama_get_predefined_threats_list(target)
demisto.results(fileResult('predefined-threats.json', json.dumps(result['response']['result']).encode('utf-8')))
|
24,880 |
def _loop_exits_early(loop):
"""
Returns true if a loop mays end up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop mays end up in a break statement, False otherwise.
"""
loop_nodes = (nodes.For, nodes.While)
definition_nodes = (nodes.FunctionDef, nodes.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(nodes.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
|
def _loop_exits_early(loop):
"""
Returns true if a loop may end with a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop mays end up in a break statement, False otherwise.
"""
loop_nodes = (nodes.For, nodes.While)
definition_nodes = (nodes.FunctionDef, nodes.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(nodes.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
|
5,641 |
def variation(a, axis=0):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
Notes
-----
For more details about `variation`, see `stats.variation`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats.mstats import variation
>>> a = np.array([2,8,4])
>>> variation(a)
0.5345224838248487
>>> b = np.array([2,8,3,4])
>>> c = np.ma.masked_array(b, mask=[0,0,1,0])
>>> variation(c)
0.5345224838248487
In the example above, it can be seen that this works as same as
`stats.variation` except 'stats.mstats.variation' ignore for
invalid array elements.
"""
a, axis = _chk_asarray(a, axis)
return a.std(axis)/a.mean(axis)
|
def variation(a, axis=0):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
Notes
-----
For more details about `variation`, see `stats.variation`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats.mstats import variation
>>> a = np.array([2,8,4])
>>> variation(a)
0.5345224838248487
>>> b = np.array([2,8,3,4])
>>> c = np.ma.masked_array(b, mask=[0,0,1,0])
>>> variation(c)
0.5345224838248487
In the example above, it can be seen that this works the same as
`stats.variation` except 'stats.mstats.variation' ignore for
invalid array elements.
"""
a, axis = _chk_asarray(a, axis)
return a.std(axis)/a.mean(axis)
|
25,912 |
def load_arguments(self, _):
# Model imports
StorageAccountTypes = self.get_models('StorageAccountTypes')
DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes,', operation_group='disks')
SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
HyperVGenerationTypes, HyperVGeneration = self.get_models('HyperVGenerationTypes', 'HyperVGeneration')
DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes')
OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.")
image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name')
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
ultra_ssd_enabled_type = CLIArgumentType(
arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
scale_in_policy_type = CLIArgumentType(
nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')),
help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.'
)
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('for_upload', arg_type=get_three_state_flag(),
help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope))
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType')),
help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.')
c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.')
c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
c.argument('upload_size_bytes', type=int, min_api='2019-03-01',
help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520')
c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time')
c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('gallery_image_reference', help='ID of the shared galley image version from which to create a disk')
c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01',
help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed')
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes),
help="Storage caching type for the image's data disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
# endregion
# region Image Templates
with self.argument_context('image builder') as c:
ib_output_name_help = "Name of the image builder run output."
c.argument('location', get_location_type(self.cli_ctx))
c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."
" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'")
c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.")
c.argument('image_template_name', image_template_name_type, help="The name of the image template.")
c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image")
c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." '
'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.')
c.argument('output_name', help=ib_output_name_help)
c.ignore('destinations_lists', 'scripts_list', 'source_dict')
with self.argument_context('image builder create') as c:
ib_source_type = CLIArgumentType(arg_group="Image Source")
ib_customizer_type = CLIArgumentType(arg_group="Customizer")
ib_cutput_type = CLIArgumentType(arg_group="Output")
c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.")
c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/image-builder-json')
c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.')
# Image Source Arguments
c.argument('source', arg_type=ib_source_type)
c.argument('checksum', arg_type=ib_source_type)
c.argument('', arg_type=ib_source_type)
# Image Customizer Arguments
c.argument('scripts', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
# Image Output Arguments
c.argument('managed_image_destinations', arg_type=ib_cutput_type)
c.argument('shared_image_destinations', arg_type=ib_cutput_type)
c.argument('output_name', arg_type=ib_cutput_type)
with self.argument_context('image builder output') as c:
ib_sig_regions_help = "Space-separated list of regions to replicate the image version into."
ib_img_location_help = "Location where the customized image will be created."
c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.")
c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help)
c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.")
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help)
with self.argument_context('image builder output add') as c:
ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help']
ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"])
ib_default_loc_help = " Defaults to resource group's location."
c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help)
c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true')
c.argument('tags', arg_type=ib_artifact_tags_type)
c.ignore('location')
with self.argument_context('image builder customizer') as c:
ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart")
ib_win_update_type = CLIArgumentType(arg_group="Windows Update")
ib_script_type = CLIArgumentType(arg_group="Shell and Powershell")
ib_powershell_type = CLIArgumentType(arg_group="Powershell")
ib_file_customizer_type = CLIArgumentType(arg_group="File")
c.argument('customizer_name', help="Name of the customizer.")
c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType))
# Script Args
c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.")
c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.")
# Powershell Specific Args
c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers")
# Windows Restart Specific Args
c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.")
c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.")
c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m")
# Windows Update Specific Args
c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.')
c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)')
c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)')
# File Args
c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.")
c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image")
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE):
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine")
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
with self.argument_context('vm create', arg_group='Storage') as c:
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name of the dedicated host group containing the dedicated host this VM will reside in.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="Name or ID of the dedicated host this VM will reside in. If a name is specified, a host group must be specified via `--host-group`.")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', deprecate_info=c.deprecate(expiration='3.0.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'])
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
with self.argument_context('vm image terms') as c:
c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted')
c.argument('publisher', help='Image publisher')
c.argument('offer', help='Image offer')
c.argument('plan', help='Image billing plan')
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
with self.argument_context('vm host') as c:
c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group")
c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host")
c.ignore('expand')
with self.argument_context('vm host create') as c:
c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int,
help="Fault domain of the host within a group. Allowed values: 0, 1, 2")
c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(),
help="Replace the host automatically if a failure occurs")
c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes),
help="The software license type that will be applied to the VMs deployed on the dedicated host.")
c.argument('sku', arg_type=get_enum_type(['DSv3-Type1', 'ESv3-Type1', 'FSv2-Type2']),
help="Sku of the dedicated host.")
with self.argument_context('vm host list') as c:
c.argument('host_group_name', id_part=None)
with self.argument_context('vm host group') as c:
c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group")
with self.argument_context('vm host group create') as c:
c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int,
help="Number of fault domains that the host group can span. Allowed values: 1, 2, 3")
c.argument('zones', zone_type)
for scope in ["vm host", "vm host group"]:
with self.argument_context("{} create".format(scope)) as c:
location_type = get_location_type(self.cli_ctx)
custom_location_msg = " Otherwise, location will default to the resource group's location"
custom_location_type = CLIArgumentType(overrides=location_type,
help=location_type.settings["help"] + custom_location_msg)
c.argument('location', arg_type=custom_location_type)
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create') as c:
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group."
" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details, default to false", default=False)
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set", is_preview=True)
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long')
c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In VM mode, you manually create and add a virtual machine of any configuration to the scale set. In ScaleSetVM mode, you define a virtual machine model and Azure will generate identical instances based on that model.',
arg_type=get_enum_type(['VM', 'ScaleSetVM']), is_preview=True)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('automatic_repairs_grace_period', min_api='2018-10-01',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.')
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vmss update') as c:
protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01')
c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.")
c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).")
c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(),
help='Enable terminate notification')
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('scale_in_policy', scale_in_policy_type)
with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c:
c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs')
c.argument(
'automatic_repairs_grace_period',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.'
)
for scope in ['vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('terminate_notification_time', min_api='2019-03-01',
help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted')
for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
with self.argument_context(scope) as c:
c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix))
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss set-orchestration-service-state') as c:
c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.')
c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.')
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade', arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file")
with self.argument_context('{} stop'.format(scope)) as c:
c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM.', default=get_default_admin_username())
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if DiskStorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.')
c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01',
help='Names or IDs (space delimited) of disk encryption sets for data disks.')
c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default.', default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access")
c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have")
c.ignore('identity_role_id')
with self.argument_context('vm auto-shutdown') as c:
c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.')
c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)')
c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730')
c.argument('webhook', help='The webhook URL to which the notification will be sent')
c.argument('location', validator=get_default_location_from_resource_group)
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade.')
with self.argument_context('vm extension set') as c:
c.argument('vm_extension_name', name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'),
help='Name of the extension.', id_part=None)
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \
"To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \
"To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \
"For more information see the Azure Windows VM online docs."
c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'None']))
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True,
help='The maximum price (in US Dollars) you are willing to pay for a low priority VM/VMSS. -1 indicates that the low priority VM/VMSS should not be evicted for price reasons')
with self.argument_context('vmss create') as c:
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.")
c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
with self.argument_context('sig update') as c:
c.ignore('gallery')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], )
with self.argument_context('sig image-version create') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('os_snapshot', help='Name or ID of OS disk snapshot')
c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots')
c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions",
arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01')
c.argument('target_region_encryption', nargs='+',
help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.')
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace,
help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. '
'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
# endregion
# region Proximity Placement Group
with self.argument_context('ppg', min_api='2018-04-01') as c:
c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.")
with self.argument_context('ppg create', min_api='2018-04-01') as c:
c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.")
c.argument('tags', tags_type)
with self.argument_context('ppg show', min_api='2019-07-01') as c:
c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.')
for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'),
('vm availability-set create', 'availability set'),
('vm update', 'VM'), ('vmss update', 'VMSS'),
('vm availability-set update', 'availability set')]:
with self.argument_context(scope, min_api='2018-04-01') as c:
c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item),
validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added.
# endregion
# region VM Monitor
with self.argument_context('vm monitor log show') as c:
c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.")
c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.")
with self.argument_context('vm monitor metrics') as c:
c.argument('metricnamespace', options_list=['--namespace'],
help='Namespace to query metric definitions for.')
with self.argument_context('vm monitor metrics tail') as c:
from azure.mgmt.monitor.models import AggregationType
c.extra('resource_group_name', required=True)
c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
c.argument('metadata', action='store_true')
c.argument('dimension', nargs='*', validator=validate_metric_dimension)
c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*')
c.argument('metrics', nargs='*')
c.argument('orderby',
help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc')
c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.')
c.argument('filters', options_list=['--filter'])
c.argument('metric_namespace', options_list=['--namespace'])
with self.argument_context('vm monitor metrics tail', arg_group='Time') as c:
c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.'))
c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.'))
c.argument('offset', type=get_period_type(as_timedelta=True))
c.argument('interval', arg_group='Time', type=get_period_type())
with self.argument_context('vm monitor metrics list-definitions') as c:
c.extra('resource_group_name', required=True)
c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
# endregion
# region disk encryption set
with self.argument_context('disk-encryption-set') as c:
c.argument('disk_encryption_set_name', disk_encryption_set_name)
c.argument('key_url', help='URL pointing to a key or secret in KeyVault.')
c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endregion
|
def load_arguments(self, _):
# Model imports
StorageAccountTypes = self.get_models('StorageAccountTypes')
DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes,', operation_group='disks')
SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
HyperVGenerationTypes, HyperVGeneration = self.get_models('HyperVGenerationTypes', 'HyperVGeneration')
DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes')
OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.")
image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name')
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
ultra_ssd_enabled_type = CLIArgumentType(
arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
scale_in_policy_type = CLIArgumentType(
nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')),
help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.'
)
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('for_upload', arg_type=get_three_state_flag(),
help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope))
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType')),
help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.')
c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.')
c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
c.argument('upload_size_bytes', type=int, min_api='2019-03-01',
help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520')
c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time')
c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('gallery_image_reference', help='ID of the shared galley image version from which to create a disk')
c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01',
help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed')
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes),
help="Storage caching type for the image's data disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
# endregion
# region Image Templates
with self.argument_context('image builder') as c:
ib_output_name_help = "Name of the image builder run output."
c.argument('location', get_location_type(self.cli_ctx))
c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."
" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'")
c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.")
c.argument('image_template_name', image_template_name_type, help="The name of the image template.")
c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image")
c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." '
'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.')
c.argument('output_name', help=ib_output_name_help)
c.ignore('destinations_lists', 'scripts_list', 'source_dict')
with self.argument_context('image builder create') as c:
ib_source_type = CLIArgumentType(arg_group="Image Source")
ib_customizer_type = CLIArgumentType(arg_group="Customizer")
ib_cutput_type = CLIArgumentType(arg_group="Output")
c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.")
c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/image-builder-json')
c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.')
# Image Source Arguments
c.argument('source', arg_type=ib_source_type)
c.argument('checksum', arg_type=ib_source_type)
c.argument('', arg_type=ib_source_type)
# Image Customizer Arguments
c.argument('scripts', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
# Image Output Arguments
c.argument('managed_image_destinations', arg_type=ib_cutput_type)
c.argument('shared_image_destinations', arg_type=ib_cutput_type)
c.argument('output_name', arg_type=ib_cutput_type)
with self.argument_context('image builder output') as c:
ib_sig_regions_help = "Space-separated list of regions to replicate the image version into."
ib_img_location_help = "Location where the customized image will be created."
c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.")
c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help)
c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.")
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help)
with self.argument_context('image builder output add') as c:
ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help']
ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"])
ib_default_loc_help = " Defaults to resource group's location."
c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help)
c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true')
c.argument('tags', arg_type=ib_artifact_tags_type)
c.ignore('location')
with self.argument_context('image builder customizer') as c:
ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart")
ib_win_update_type = CLIArgumentType(arg_group="Windows Update")
ib_script_type = CLIArgumentType(arg_group="Shell and Powershell")
ib_powershell_type = CLIArgumentType(arg_group="Powershell")
ib_file_customizer_type = CLIArgumentType(arg_group="File")
c.argument('customizer_name', help="Name of the customizer.")
c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType))
# Script Args
c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.")
c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.")
# Powershell Specific Args
c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers")
# Windows Restart Specific Args
c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.")
c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.")
c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m")
# Windows Update Specific Args
c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.')
c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)')
c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)')
# File Args
c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.")
c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image")
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE):
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine")
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
with self.argument_context('vm create', arg_group='Storage') as c:
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name of the dedicated host group containing the dedicated host this VM will reside in.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="Name or ID of the dedicated host this VM will reside in. If a name is specified, a host group must be specified via `--host-group`.")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', deprecate_info=c.deprecate(expiration='3.0.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'])
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
with self.argument_context('vm image terms') as c:
c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted')
c.argument('publisher', help='Image publisher')
c.argument('offer', help='Image offer')
c.argument('plan', help='Image billing plan')
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
with self.argument_context('vm host') as c:
c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group")
c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host")
c.ignore('expand')
with self.argument_context('vm host create') as c:
c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int,
help="Fault domain of the host within a group. Allowed values: 0, 1, 2")
c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(),
help="Replace the host automatically if a failure occurs")
c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes),
help="The software license type that will be applied to the VMs deployed on the dedicated host.")
c.argument('sku', arg_type=get_enum_type(['DSv3-Type1', 'ESv3-Type1', 'FSv2-Type2']),
help="Sku of the dedicated host.")
with self.argument_context('vm host list') as c:
c.argument('host_group_name', id_part=None)
with self.argument_context('vm host group') as c:
c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group")
with self.argument_context('vm host group create') as c:
c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int,
help="Number of fault domains that the host group can span. Allowed values: 1, 2, 3")
c.argument('zones', zone_type)
for scope in ["vm host", "vm host group"]:
with self.argument_context("{} create".format(scope)) as c:
location_type = get_location_type(self.cli_ctx)
custom_location_msg = " Otherwise, location will default to the resource group's location"
custom_location_type = CLIArgumentType(overrides=location_type,
help=location_type.settings["help"] + custom_location_msg)
c.argument('location', arg_type=custom_location_type)
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create') as c:
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group."
" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details. Default to false", default=False)
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set", is_preview=True)
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long')
c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In VM mode, you manually create and add a virtual machine of any configuration to the scale set. In ScaleSetVM mode, you define a virtual machine model and Azure will generate identical instances based on that model.',
arg_type=get_enum_type(['VM', 'ScaleSetVM']), is_preview=True)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('automatic_repairs_grace_period', min_api='2018-10-01',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.')
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vmss update') as c:
protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01')
c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.")
c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).")
c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(),
help='Enable terminate notification')
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('scale_in_policy', scale_in_policy_type)
with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c:
c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs')
c.argument(
'automatic_repairs_grace_period',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.'
)
for scope in ['vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('terminate_notification_time', min_api='2019-03-01',
help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted')
for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
with self.argument_context(scope) as c:
c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix))
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss set-orchestration-service-state') as c:
c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.')
c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.')
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade', arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file")
with self.argument_context('{} stop'.format(scope)) as c:
c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM.', default=get_default_admin_username())
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if DiskStorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.')
c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01',
help='Names or IDs (space delimited) of disk encryption sets for data disks.')
c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default.', default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access")
c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have")
c.ignore('identity_role_id')
with self.argument_context('vm auto-shutdown') as c:
c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.')
c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)')
c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730')
c.argument('webhook', help='The webhook URL to which the notification will be sent')
c.argument('location', validator=get_default_location_from_resource_group)
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade.')
with self.argument_context('vm extension set') as c:
c.argument('vm_extension_name', name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'),
help='Name of the extension.', id_part=None)
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \
"To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \
"To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \
"For more information see the Azure Windows VM online docs."
c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'None']))
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True,
help='The maximum price (in US Dollars) you are willing to pay for a low priority VM/VMSS. -1 indicates that the low priority VM/VMSS should not be evicted for price reasons')
with self.argument_context('vmss create') as c:
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.")
c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
with self.argument_context('sig update') as c:
c.ignore('gallery')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], )
with self.argument_context('sig image-version create') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('os_snapshot', help='Name or ID of OS disk snapshot')
c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots')
c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions",
arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01')
c.argument('target_region_encryption', nargs='+',
help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.')
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace,
help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. '
'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
# endregion
# region Proximity Placement Group
with self.argument_context('ppg', min_api='2018-04-01') as c:
c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.")
with self.argument_context('ppg create', min_api='2018-04-01') as c:
c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.")
c.argument('tags', tags_type)
with self.argument_context('ppg show', min_api='2019-07-01') as c:
c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.')
for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'),
('vm availability-set create', 'availability set'),
('vm update', 'VM'), ('vmss update', 'VMSS'),
('vm availability-set update', 'availability set')]:
with self.argument_context(scope, min_api='2018-04-01') as c:
c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item),
validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added.
# endregion
# region VM Monitor
with self.argument_context('vm monitor log show') as c:
c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.")
c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.")
with self.argument_context('vm monitor metrics') as c:
c.argument('metricnamespace', options_list=['--namespace'],
help='Namespace to query metric definitions for.')
with self.argument_context('vm monitor metrics tail') as c:
from azure.mgmt.monitor.models import AggregationType
c.extra('resource_group_name', required=True)
c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
c.argument('metadata', action='store_true')
c.argument('dimension', nargs='*', validator=validate_metric_dimension)
c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*')
c.argument('metrics', nargs='*')
c.argument('orderby',
help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc')
c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.')
c.argument('filters', options_list=['--filter'])
c.argument('metric_namespace', options_list=['--namespace'])
with self.argument_context('vm monitor metrics tail', arg_group='Time') as c:
c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.'))
c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.'))
c.argument('offset', type=get_period_type(as_timedelta=True))
c.argument('interval', arg_group='Time', type=get_period_type())
with self.argument_context('vm monitor metrics list-definitions') as c:
c.extra('resource_group_name', required=True)
c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
# endregion
# region disk encryption set
with self.argument_context('disk-encryption-set') as c:
c.argument('disk_encryption_set_name', disk_encryption_set_name)
c.argument('key_url', help='URL pointing to a key or secret in KeyVault.')
c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endregion
|
23,651 |
def get_total_irradiance(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990', **kwargs):
r"""
Determine total in-plane irradiance and its beam, sky diffuse and ground
reflected components, using the specified sky diffuse irradiance model.
.. math::
I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground}
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.[degree]
surface_azimuth : numeric
Panel azimuth from north. [degree]
solar_zenith : numeric
Solar zenith angle. [degree]
solar_azimuth : numeric
Solar azimuth angle. [degree]
dni : numeric
Direct Normal Irradiance. [W/m2]
ghi : numeric
Global horizontal irradiance. [W/m2]
dhi : numeric
Diffuse horizontal irradiance. [W/m2]
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance. [W/m2]
airmass : None or numeric, default None
Relative airmass (not adjusted for pressure). [unitless]
albedo : numeric, default 0.25
Surface albedo. [unitless]
surface_type : None or String, default None
Surface type. See :py:func:`~pvlib.irradiance.grounddiffuse` for
the list of accepted values.
model : String, default 'isotropic'
Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`.
Returns
-------
total_irrad : OrderedDict or DataFrame
Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse',
'poa_sky_diffuse', 'poa_ground_diffuse'``.
Notes
-----
Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can
be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`.
The 'perez' model requires relative airmass ('airmass') as input. If
'airmass' is not provided, it is calculated usign the defaults in
:py:func:`~pvlib.irradiance.get_relative_airmass`.
"""
poa_sky_diffuse = get_sky_diffuse(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model,
model_perez=model_perez)
poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo,
surface_type)
aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth)
irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse)
return irrads
|
def get_total_irradiance(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990', **kwargs):
r"""
Determine total in-plane irradiance and its beam, sky diffuse and ground
reflected components, using the specified sky diffuse irradiance model.
.. math::
I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground}
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.[degree]
surface_azimuth : numeric
Panel azimuth from north. [degree]
solar_zenith : numeric
Solar zenith angle. [degree]
solar_azimuth : numeric
Solar azimuth angle. [degree]
dni : numeric
Direct Normal Irradiance. [W/m2]
ghi : numeric
Global horizontal irradiance. [W/m2]
dhi : numeric
Diffuse horizontal irradiance. [W/m2]
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance. [W/m2]
airmass : None or numeric, default None
Relative airmass (not adjusted for pressure). [unitless]
albedo : numeric, default 0.25
Surface albedo. [unitless]
surface_type : None or String, default None
Surface type. See :py:func:`~pvlib.irradiance.grounddiffuse` for
the list of accepted values.
model : String, default 'isotropic'
Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`.
Returns
-------
total_irrad : OrderedDict or DataFrame
Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse',
'poa_sky_diffuse', 'poa_ground_diffuse'``.
Notes
-----
Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can
be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`.
The 'perez' model requires relative airmass ('airmass') as input. If
``airmass`` is not provided, it is calculated using the defaults in
:py:func:`~pvlib.irradiance.get_relative_airmass`.
"""
poa_sky_diffuse = get_sky_diffuse(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model,
model_perez=model_perez)
poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo,
surface_type)
aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth)
irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse)
return irrads
|
3,270 |
def validate_argument_count(field, function, arguments):
"""
Validate the number of required arguments the function defines against
provided arguments. Raise an exception if there is a mismatch in the
number of arguments. Do not return any values.
There are 4 cases:
1. provided # of arguments != required # of arguments AND provided # of arguments != total # of arguments (bad, raise an error)
2. provided # of arguments < required # of arguments (bad, raise an error)
3. provided # of arguments > total # of arguments (bad, raise an error)
4. required # of arguments <= provided # of arguments <= total # of arguments (good, pass the validation)
"""
args_count = len(arguments)
total_args_count = count_total_arguments(function)
if args_count != total_args_count:
required_args_count = count_required_arguments(function)
if required_args_count == total_args_count:
raise InvalidSearchQuery(
u"{}: expected {:g} arguments".format(field, len(function["args"]))
)
elif args_count < required_args_count:
raise InvalidSearchQuery(
u"{}: expected at least {:g} arguments".format(field, required_args_count)
)
elif args_count > total_args_count:
raise InvalidSearchQuery(
u"{}: expected at most {:g} arguments".format(field, total_args_count)
)
|
def validate_argument_count(field, function, arguments):
"""
Validate the number of required arguments the function defines against
provided arguments. Raise an exception if there is a mismatch in the
number of arguments. Do not return any values.
There are 4 cases:
1. provided # of arguments != required # of arguments AND provided # of arguments != total # of arguments (bad, raise an error)
2. provided # of arguments < required # of arguments (bad, raise an error)
3. provided # of arguments > total # of arguments (bad, raise an error)
4. required # of arguments <= provided # of arguments <= total # of arguments (good, pass the validation)
"""
args_count = len(arguments)
total_args_count = count_total_arguments(function)
if args_count != total_args_count:
required_args_count = len(filter(lambda arg: arg.has_default, function["args"]))
if required_args_count == total_args_count:
raise InvalidSearchQuery(
u"{}: expected {:g} arguments".format(field, len(function["args"]))
)
elif args_count < required_args_count:
raise InvalidSearchQuery(
u"{}: expected at least {:g} arguments".format(field, required_args_count)
)
elif args_count > total_args_count:
raise InvalidSearchQuery(
u"{}: expected at most {:g} arguments".format(field, total_args_count)
)
|
32,265 |
def get_remote_data_command(client: Client, args: Dict[str, Any], params: Dict) -> Union[List[Dict[str, Any]], str]:
"""
get-remote-data command: Returns an updated incident and entries
Args:
client: XSOAR client to use
args:
id: incident id to retrieve
lastUpdate: when was the last time we retrieved data
Returns:
List[Dict[str, Any]]: first entry is the incident (which can be completely empty) and the new entries.
"""
ticket_id = args.get('id', '')
demisto.debug(f'Getting update for remote {ticket_id}')
last_update = dateparser.parse(str(args.get('lastUpdate')), settings={'TIMEZONE': 'UTC'}) # type: ignore
assert last_update is not None
demisto.debug(f'last_update is {last_update}')
_args = {}
_args['incident_id'] = ticket_id
result = get_incidents_list(client=client, args=_args)
if not result:
demisto.debug('Ticket was not found!')
return 'Ticket was not found'
else:
demisto.debug('Ticket was found!')
ticket = result[0]
ticket_last_update = dateparser.parse(str(ticket["modificationDate"]), settings={'TIMEZONE': 'UTC'}) # type: ignore
assert ticket_last_update is not None
if last_update > ticket_last_update:
demisto.debug('Nothing new in the ticket')
ticket = {}
else:
demisto.debug('ticket is updated')
entries = []
# Get actions
# - could be optimized if list_actions would apply filter with last_update timestamp
actions = client.list_actions(incident_id=ticket_id, incident_number=None)
# Filter actions
for action in actions:
if 'Mirrored from Cortex XSOAR' not in action['memoText']:
entry_date = dateparser.parse(action["entryDate"], settings={'TIMEZONE': 'UTC'}) # type: ignore
assert entry_date is not None
if last_update > entry_date:
demisto.debug('skip entry')
else:
demisto.debug('mirror entry to xsoar')
if action["operator"]:
name = action["operator"]["name"]
elif action["person"]:
name = action["person"]["name"]
else:
name = "Unknown"
date_time = entry_date.strftime("%d-%m-%Y %H:%M:%S")
entries.append({
'Type': EntryType.NOTE,
'Contents': f'[{date_time}] {name}:\n\n{action["memoText"]}',
'ContentsFormat': EntryFormat.TEXT,
'Tags': ['mirrored'], # the list of tags to add to the entry
'Note': True # boolean, True for Note, False otherwise
})
if ticket.get('closed'):
if params.get('close_incident'):
demisto.debug(f'ticket is closed: {ticket}')
entries.append({
'Type': EntryType.NOTE,
'Contents': {
'dbotIncidentClose': True,
'closeReason': 'Closed by TOPdesk'
},
'ContentsFormat': EntryFormat.JSON
})
demisto.debug(f'Pull result is {ticket}')
return [ticket] + entries
|
def get_remote_data_command(client: Client, args: Dict[str, Any], params: Dict) -> Union[List[Dict[str, Any]], str]:
"""
get-remote-data command: Returns an updated incident and entries
Args:
client: XSOAR client to use
args:
id: incident id to retrieve
lastUpdate: when was the last time we retrieved data
Returns:
List[Dict[str, Any]]: first entry is the incident (which can be completely empty) and the new entries.
"""
ticket_id = args.get('id', '')
demisto.debug(f'Getting update for remote {ticket_id}')
last_update = dateparser.parse(str(args.get('lastUpdate')), settings={'TIMEZONE': 'UTC'}) # type: ignore
assert last_update is not None
demisto.debug(f'last_update is {last_update}')
_args = {}
_args['incident_id'] = ticket_id
result = get_incidents_list(client=client, args=_args)
if not result:
demisto.debug('Ticket was not found!')
return 'Ticket was not found'
else:
demisto.debug('Ticket was found!')
ticket = result[0]
ticket_last_update = dateparser.parse(str(ticket["modificationDate"]), settings={'TIMEZONE': 'UTC'}) # type: ignore
assert ticket_last_update is not None
if last_update > ticket_last_update:
demisto.debug('Nothing new in the ticket')
ticket = {}
else:
demisto.debug('ticket is updated')
entries = []
# Get actions
# - could be optimized if list_actions would apply filter with last_update timestamp
actions = client.list_actions(incident_id=ticket_id, incident_number=None)
# Filter actions
for action in actions:
if 'Mirrored from Cortex XSOAR' not in action['memoText']:
entry_date = dateparser.parse(action["entryDate"], settings={'TIMEZONE': 'UTC'}) # type: ignore
assert entry_date is not None
if last_update > entry_date:
demisto.debug('skip entry')
else:
demisto.debug('mirror entry to xsoar')
if action["operator"]:
name = action["operator"]["name"]
elif action["person"]:
name = action["person"]["name"]
else:
name = "Unknown"
date_time = entry_date.strftime(DATE_FORMAT)
entries.append({
'Type': EntryType.NOTE,
'Contents': f'[{date_time}] {name}:\n\n{action["memoText"]}',
'ContentsFormat': EntryFormat.TEXT,
'Tags': ['mirrored'], # the list of tags to add to the entry
'Note': True # boolean, True for Note, False otherwise
})
if ticket.get('closed'):
if params.get('close_incident'):
demisto.debug(f'ticket is closed: {ticket}')
entries.append({
'Type': EntryType.NOTE,
'Contents': {
'dbotIncidentClose': True,
'closeReason': 'Closed by TOPdesk'
},
'ContentsFormat': EntryFormat.JSON
})
demisto.debug(f'Pull result is {ticket}')
return [ticket] + entries
|
39,738 |
def unit_variance_mlpg_matrix(windows, T):
"""Compute MLPG matrix assuming input is normalized to have unit-variances.
Let :math:`\mu` is the input mean sequence (``num_windows*T x static_dim``),
:math:`W` is a window matrix ``(T x num_windows*T)``, assuming input is
normalized to have unit-variances, MLPG can be written as follows:
.. math::
y = R \mu
where
.. math::
R = (W^{T} W)^{-1} W^{T}
Here we call :math:`R` as the MLPG matrix.
Args:
windows: (list): List of windows.
T (int): Number of frames.
Returns:
numpy.ndarray: MLPG matrix (``T x nun_windows*T``).
See also:
:func:`nnmnkwii.autograd.UnitVarianceMLPG`,
:func:`nnmnkwii.paramgen.mlpg`.
Examples:
>>> from nnmnkwii import paramgen as G
>>> import numpy as np
>>> windows = [
... (0, 0, np.array([1.0])),
... (1, 1, np.array([-0.5, 0.0, 0.5])),
... (1, 1, np.array([1.0, -2.0, 1.0])),
... ]
>>> G.unit_variance_mlpg_matrix(windows, 3)
array([[ 2.73835927e-01, 1.95121944e-01, 9.20177400e-02,
9.75609720e-02, -9.09090936e-02, -9.75609720e-02,
-3.52549881e-01, -2.43902430e-02, 1.10864742e-02],
[ 1.95121944e-01, 3.41463417e-01, 1.95121944e-01,
1.70731708e-01, -5.55111512e-17, -1.70731708e-01,
-4.87804860e-02, -2.92682916e-01, -4.87804860e-02],
[ 9.20177400e-02, 1.95121944e-01, 2.73835927e-01,
9.75609720e-02, 9.09090936e-02, -9.75609720e-02,
1.10864742e-02, -2.43902430e-02, -3.52549881e-01]], dtype=float32)
"""
win_mats = build_win_mats(windows, T)
sdw = np.max([win_mat.l + win_mat.u for win_mat in win_mats])
max_win_width = np.max([max(win_mat.l, win_mat.u) for win_mat in win_mats])
P = bm.zeros(sdw, sdw, T)
# set edge precitions to zero
precisions = bm.zeros(0, 0, T)
precisions.data[:, max_win_width:-max_win_width] += 1.0
mod_win_mats = list()
for win_index, win_mat in enumerate(win_mats):
if win_index != 0:
# use zero precisions for dynamic features
mod_win_mat = bm.dot_mm(precisions, win_mat)
bm.dot_mm_plus_equals(mod_win_mat.T, win_mat, target_bm=P)
mod_win_mats.append(mod_win_mat)
else:
# static features
bm.dot_mm_plus_equals(win_mat.T, win_mat, target_bm=P)
mod_win_mats.append(win_mat)
chol_bm = bla.cholesky(P, lower=True)
Pinv = cholesky_inv_banded(chol_bm.full(), width=chol_bm.l + chol_bm.u + 1)
cocatenated_window = full_window_mat(mod_win_mats, T)
return Pinv.dot(cocatenated_window.T).astype(np.float32)
|
def unit_variance_mlpg_matrix(windows, T):
"""Compute MLPG matrix assuming input is normalized to have unit-variances.
Let :math:`\mu` is the input mean sequence (``num_windows*T x static_dim``),
:math:`W` is a window matrix ``(T x num_windows*T)``, assuming input is
normalized to have unit-variances, MLPG can be written as follows:
.. math::
y = R \mu
where
.. math::
R = (W^{T} W)^{-1} W^{T}
Here we call :math:`R` as the MLPG matrix.
Args:
windows: (list): List of windows.
T (int): Number of frames.
Returns:
numpy.ndarray: MLPG matrix (``T x nun_windows*T``).
See also:
:func:`nnmnkwii.autograd.UnitVarianceMLPG`,
:func:`nnmnkwii.paramgen.mlpg`.
Examples:
>>> from nnmnkwii import paramgen as G
>>> import numpy as np
>>> windows = [
... (0, 0, np.array([1.0])),
... (1, 1, np.array([-0.5, 0.0, 0.5])),
... (1, 1, np.array([1.0, -2.0, 1.0])),
... ]
>>> G.unit_variance_mlpg_matrix(windows, 3)
array([[ 2.73835927e-01, 1.95121944e-01, 9.20177400e-02,
9.75609720e-02, -9.09090936e-02, -9.75609720e-02,
-3.52549881e-01, -2.43902430e-02, 1.10864742e-02],
[ 1.95121944e-01, 3.41463417e-01, 1.95121944e-01,
1.70731708e-01, -5.55111512e-17, -1.70731708e-01,
-4.87804860e-02, -2.92682916e-01, -4.87804860e-02],
[ 9.20177400e-02, 1.95121944e-01, 2.73835927e-01,
9.75609720e-02, 9.09090936e-02, -9.75609720e-02,
1.10864742e-02, -2.43902430e-02, -3.52549881e-01]], dtype=float32)
"""
win_mats = build_win_mats(windows, T)
sdw = np.max([win_mat.l + win_mat.u for win_mat in win_mats])
max_win_width = np.max([max(win_mat.l, win_mat.u) for win_mat in win_mats])
P = bm.zeros(sdw, sdw, T)
# set edge precitions to zero
precisions = bm.zeros(0, 0, T)
precisions.data[:, max_win_width:-max_win_width] += 1.0
mod_win_mats = []
for win_index, win_mat in enumerate(win_mats):
if win_index != 0:
# use zero precisions for dynamic features
mod_win_mat = bm.dot_mm(precisions, win_mat)
bm.dot_mm_plus_equals(mod_win_mat.T, win_mat, target_bm=P)
mod_win_mats.append(mod_win_mat)
else:
# static features
bm.dot_mm_plus_equals(win_mat.T, win_mat, target_bm=P)
mod_win_mats.append(win_mat)
chol_bm = bla.cholesky(P, lower=True)
Pinv = cholesky_inv_banded(chol_bm.full(), width=chol_bm.l + chol_bm.u + 1)
cocatenated_window = full_window_mat(mod_win_mats, T)
return Pinv.dot(cocatenated_window.T).astype(np.float32)
|
32,129 |
def parse(report: dict, keys: list) -> Union[dict, None]:
outputs = {}
for key in keys:
if key[0] in report and report[key[0]]:
item_value = report[key[0]]
outputs[key[1]] = item_value
return outputs if outputs else None
|
def parse(report: dict, keys: List[tuple]) -> Union[dict, None]:
outputs = {}
for key in keys:
if key[0] in report and report[key[0]]:
item_value = report[key[0]]
outputs[key[1]] = item_value
return outputs if outputs else None
|
23,338 |
def test_pylint_max_history_conf(
pylint_test_script, pylint_test_scripts, mocker, qtbot):
"""Regression test for checking max_entries configuration.
For further information see spyder-ide/spyder#12884
"""
# Create the pylint widget for code analysis
main_window = MainWindowMock()
main_window.projects.get_active_project_path = mocker.MagicMock(
return_value=None)
pylint_sw = Pylint(parent=main_window)
pylint_widget = PylintWidget(parent=pylint_sw)
pylint_widget.filecombo.clear()
# Change the max_entry to 2
pylint_widget.parent.set_option('max_entries', 2)
assert pylint_widget.parent.get_option('max_entries') == 2
# Call to set_filename
pylint_widget.set_filename(filename=pylint_test_script)
assert pylint_widget.filecombo.count() == 1
# Add to more filenames
script_1, script_2 = pylint_test_scripts
pylint_widget.set_filename(filename=script_1)
pylint_widget.set_filename(filename=script_2)
assert pylint_widget.filecombo.count() == 2
assert 'test_script_1.py' in pylint_widget.curr_filenames[0]
assert 'test_script_2.py' in pylint_widget.curr_filenames[1]
|
def test_pylint_max_history_conf(
pylint_test_script, pylint_test_scripts, mocker, qtbot):
"""Regression test for checking max_entries configuration.
For further information see spyder-ide/spyder#12884
"""
# Create the pylint widget for code analysis
main_window = MainWindowMock()
main_window.projects.get_active_project_path = mocker.MagicMock(
return_value=None)
pylint_sw = Pylint(parent=main_window)
pylint_widget = PylintWidget(parent=pylint_sw)
pylint_widget.filecombo.clear()
# Change the max_entry to 2
pylint_widget.parent.set_option('max_entries', 2)
assert pylint_widget.parent.get_option('max_entries') == 2
# Call to set_filename
pylint_widget.set_filename(filename=pylint_test_script)
assert pylint_widget.filecombo.count() == 1
# Add to more filenames
script_1, script_2 = pylint_test_scripts
pylint_widget.set_filename(filename=script_1)
pylint_widget.set_filename(filename=script_2)
assert pylint_widget.filecombo.count() == 2
assert 'test_script_1.py' in pylint_widget.curr_filenames[0]
assert 'test_script_2.py' in pylint_widget.curr_filenames[1]
|
35,552 |
def read_sensor_events(duration_sec):
sensor_events = messaging.sub_sock("sensorEvents", timeout=0.1)
start_time_sec = time.time()
events = []
while time.time() - start_time_sec < duration_sec:
events += messaging.drain_sock(sensor_events)
time.sleep(0.01)
return events
|
def read_sensor_events(duration_sec):
sensor_events = messaging.sub_sock("sensorEvents", timeout=0.1)
start_time_sec = time.monotonic()
events = []
while time.time() - start_time_sec < duration_sec:
events += messaging.drain_sock(sensor_events)
time.sleep(0.01)
return events
|
25,924 |
def start_powershell_process(ps_process_cmd):
import subprocess
return subprocess.call(['powershell.exe', '-Command', 'Start-Process {}'.format(ps_process_cmd)])
|
def powershell_start_process(ps_process_cmd):
import subprocess
return subprocess.call(['powershell.exe', '-Command', 'Start-Process {}'.format(ps_process_cmd)])
|
818 |
def test_issue_14567():
assert str(factorial(Sum(-1, (x, 0, 0))) + y) == 'y + factorial(Sum(-1, (x, 0, 0)))'
|
def test_issue_14567():
assert factorial(Sum(-1, (x, 0, 0))) + y # doesn't raise an error
|
30,824 |
def rawToDict(raw):
result = {} # type: Dict[str, str]
try:
result = json.loads(raw)
except ValueError:
if 'message' in raw:
raw = raw.replace('"', '').strip('{').strip('}')
key_val_arr = raw.split(",")
for key_val in key_val_arr:
single_key_val = key_val.split(":")
if len(single_key_val) > 1:
val = single_key_val[1]
key = single_key_val[0].strip()
if key in result.keys():
result[key] = result[key] + "," + val
else:
result[key] = val
else:
raw_response = re.split('(?<=\S),', raw) # split by any non-whitespace character
for key_val in raw_response:
key_value = key_val.replace('"', '').strip()
if '=' in key_value:
key_and_val = key_value.split('=', 1)
result[key_and_val[0]] = key_and_val[1]
if REPLACE_FLAG:
result = replace_keys(result)
return result
|
def rawToDict(raw):
result = {} # type: Dict[str, str]
try:
result = json.loads(raw)
except ValueError:
if 'message' in raw:
raw = raw.replace('"', '').strip('{').strip('}')
key_val_arr = raw.split(",")
for key_val in key_val_arr:
single_key_val = key_val.split(":", 1)
if len(single_key_val) > 1:
val = single_key_val[1]
key = single_key_val[0].strip()
if key in result.keys():
result[key] = result[key] + "," + val
else:
result[key] = val
else:
raw_response = re.split('(?<=\S),', raw) # split by any non-whitespace character
for key_val in raw_response:
key_value = key_val.replace('"', '').strip()
if '=' in key_value:
key_and_val = key_value.split('=', 1)
result[key_and_val[0]] = key_and_val[1]
if REPLACE_FLAG:
result = replace_keys(result)
return result
|
58,826 |
def dotc(x, y, out=None):
"""Computes the dot product of x.conj() and y."""
dtype = x.dtype.char
if dtype in 'fd':
return dot(x, y, out=out)
elif dtype == 'F':
func = cublas.cdotc
elif dtype == 'D':
func = cublas.zdotc
else:
raise TypeError('invalid dtype')
_check_two_vectors(x, y)
handle = device.get_cublas_handle()
result_dtype = dtype
result_ptr, result, mode = _setup_result_ptr(handle, out, result_dtype)
func(handle, x.size, x.data.ptr, 1, y.data.ptr, 1, result_ptr)
cublas.setPointerMode(handle, mode)
if out is None:
out = result
elif out.dtype != result_dtype:
out[...] = result
return out
|
def dotc(x, y, out=None):
"""Computes the dot product of x.conj() and y."""
dtype = x.dtype.char
if dtype in 'fd':
return dot(x, y, out=out)
elif dtype == 'F':
func = cublas.cdotc
elif dtype == 'D':
func = cublas.zdotc
else:
raise TypeError('invalid dtype')
_check_two_vectors(x, y)
handle = device.get_cublas_handle()
result_dtype = dtype
result_ptr, result, orig_mode = _setup_result_ptr(handle, out, result_dtype)
func(handle, x.size, x.data.ptr, 1, y.data.ptr, 1, result_ptr)
cublas.setPointerMode(handle, orig_mode)
if out is None:
out = result
elif out.dtype != result_dtype:
out[...] = result
return out
|
10,905 |
def run_hook(label, hooks, pre_step_hook=False, post_step_hook=False, args=None, msg=None):
"""
Run hook with specified label and return result of hook or None.
:param label: name of hook
:param hooks: list of defined hooks
:param pre_step_hook: indicates whether hook to run is a pre-step hook
:param post_step_hook: indicates whether hook to run is a post-step hook
:param args: arguments to pass to hook function
:param msg: custom message that is printed when hook is called
"""
hook = find_hook(label, hooks, pre_step_hook=pre_step_hook, post_step_hook=post_step_hook)
res = None
if hook:
if args is None:
args = []
if pre_step_hook:
label = 'pre-' + label
elif post_step_hook:
label = 'post-' + label
if msg is None:
msg = "Running %s hook..." % label
print_msg(msg)
_log.info("Running '%s' hook function (arguments: %s)...", hook.__name__, args)
res = hook(*args)
return res
|
def run_hook(label, hooks, pre_step_hook=False, post_step_hook=False, args=None, msg=None):
"""
Run hook with specified label and return result of hook() or None.
:param label: name of hook
:param hooks: list of defined hooks
:param pre_step_hook: indicates whether hook to run is a pre-step hook
:param post_step_hook: indicates whether hook to run is a post-step hook
:param args: arguments to pass to hook function
:param msg: custom message that is printed when hook is called
"""
hook = find_hook(label, hooks, pre_step_hook=pre_step_hook, post_step_hook=post_step_hook)
res = None
if hook:
if args is None:
args = []
if pre_step_hook:
label = 'pre-' + label
elif post_step_hook:
label = 'post-' + label
if msg is None:
msg = "Running %s hook..." % label
print_msg(msg)
_log.info("Running '%s' hook function (arguments: %s)...", hook.__name__, args)
res = hook(*args)
return res
|
37,368 |
def level_0_pass_manager(pass_manager_config: PassManagerConfig) -> PassManager:
"""Level 0 pass manager: no explicit optimization other than mapping to backend.
This pass manager applies the user-given initial layout. If none is given, a trivial
layout consisting of mapping the i-th virtual qubit to the i-th physical qubit is used.
Any unused physical qubit is allocated as ancilla space.
The pass manager then unrolls the circuit to the desired basis, and transforms the
circuit to match the coupling map.
Note:
In simulators where ``coupling_map=None``, only the unrolling and
optimization stages are done.
Args:
pass_manager_config: configuration of the pass manager.
Returns:
a level 0 pass manager.
Raises:
TranspilerError: if the passmanager config is invalid.
"""
basis_gates = pass_manager_config.basis_gates
coupling_map = pass_manager_config.coupling_map
initial_layout = pass_manager_config.initial_layout
layout_method = pass_manager_config.layout_method or 'trivial'
routing_method = pass_manager_config.routing_method or 'stochastic'
translation_method = pass_manager_config.translation_method or 'translator'
scheduling_method = pass_manager_config.scheduling_method
instruction_durations = pass_manager_config.instruction_durations
seed_transpiler = pass_manager_config.seed_transpiler
backend_properties = pass_manager_config.backend_properties
# 1. Choose an initial layout if not set by user (default: trivial layout)
_given_layout = SetLayout(initial_layout)
def _choose_layout_condition(property_set):
return not property_set['layout']
if layout_method == 'trivial':
_choose_layout = TrivialLayout(coupling_map)
elif layout_method == 'dense':
_choose_layout = DenseLayout(coupling_map, backend_properties)
elif layout_method == 'noise_adaptive':
_choose_layout = NoiseAdaptiveLayout(backend_properties)
elif layout_method == 'sabre':
_choose_layout = SabreLayout(coupling_map, max_iterations=1, seed=seed_transpiler)
else:
raise TranspilerError("Invalid layout method %s." % layout_method)
# 2. Extend dag/layout with ancillas using the full coupling map
_embed = [FullAncillaAllocation(coupling_map), EnlargeWithAncilla(), ApplyLayout()]
# 3. Decompose so only 1-qubit and 2-qubit gates remain
_unroll3q = Unroll3qOrMore()
# 4. Swap to fit the coupling map
_swap_check = CheckMap(coupling_map)
def _swap_condition(property_set):
return not property_set['is_swap_mapped']
_swap = [BarrierBeforeFinalMeasurements()]
if routing_method == 'basic':
_swap += [BasicSwap(coupling_map)]
elif routing_method == 'stochastic':
_swap += [StochasticSwap(coupling_map, trials=20, seed=seed_transpiler)]
elif routing_method == 'lookahead':
_swap += [LookaheadSwap(coupling_map, search_depth=2, search_width=2)]
elif routing_method == 'sabre':
_swap += [SabreSwap(coupling_map, heuristic='basic', seed=seed_transpiler)]
elif routing_method == 'none':
_swap += [Error(msg='No routing method applied, but it is needed. CheckMap Error: '
'{check_map_msg}', action='raise')]
else:
raise TranspilerError("Invalid routing method %s." % routing_method)
# 5. Unroll to the basis
if translation_method == 'unroller':
_unroll = [Unroller(basis_gates)]
elif translation_method == 'translator':
from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel
_unroll = [UnrollCustomDefinitions(sel, basis_gates),
BasisTranslator(sel, basis_gates)]
elif translation_method == 'synthesis':
_unroll = [
Unroll3qOrMore(),
Collect2qBlocks(),
ConsolidateBlocks(basis_gates=basis_gates),
UnitarySynthesis(basis_gates),
]
else:
raise TranspilerError("Invalid translation method %s." % translation_method)
# 6. Fix any bad CX directions
_direction_check = [CheckCXDirection(coupling_map)]
def _direction_condition(property_set):
return not property_set['is_direction_mapped']
_direction = [CXDirection(coupling_map)]
# 7. Schedule the circuit only when scheduling_method is supplied
if scheduling_method:
_scheduling = [TimeUnitAnalysis(instruction_durations)]
if scheduling_method in {'alap', 'as_late_as_possible'}:
_scheduling += [ALAPSchedule(instruction_durations)]
elif scheduling_method in {'asap', 'as_soon_as_possible'}:
_scheduling += [ASAPSchedule(instruction_durations)]
else:
raise TranspilerError("Invalid scheduling method %s." % scheduling_method)
# Build pass manager
pm0 = PassManager()
if coupling_map:
pm0.append(_given_layout)
pm0.append(_choose_layout, condition=_choose_layout_condition)
pm0.append(_embed)
pm0.append(_unroll3q)
pm0.append(_swap_check)
pm0.append(_swap, condition=_swap_condition)
pm0.append(_unroll)
if coupling_map and not coupling_map.is_symmetric:
pm0.append(_direction_check)
pm0.append(_direction, condition=_direction_condition)
if scheduling_method:
pm0.append(_scheduling)
return pm0
|
def level_0_pass_manager(pass_manager_config: PassManagerConfig) -> PassManager:
"""Level 0 pass manager: no explicit optimization other than mapping to backend.
This pass manager applies the user-given initial layout. If none is given, a trivial
layout consisting of mapping the i-th virtual qubit to the i-th physical qubit is used.
Any unused physical qubit is allocated as ancilla space.
The pass manager then unrolls the circuit to the desired basis, and transforms the
circuit to match the coupling map.
Note:
In simulators where ``coupling_map=None``, only the unrolling and
optimization stages are done.
Args:
pass_manager_config: configuration of the pass manager.
Returns:
a level 0 pass manager.
Raises:
TranspilerError: if the passmanager config is invalid.
"""
basis_gates = pass_manager_config.basis_gates
coupling_map = pass_manager_config.coupling_map
initial_layout = pass_manager_config.initial_layout
layout_method = pass_manager_config.layout_method or 'trivial'
routing_method = pass_manager_config.routing_method or 'stochastic'
translation_method = pass_manager_config.translation_method or 'translator'
scheduling_method = pass_manager_config.scheduling_method
instruction_durations = pass_manager_config.instruction_durations
seed_transpiler = pass_manager_config.seed_transpiler
backend_properties = pass_manager_config.backend_properties
# 1. Choose an initial layout if not set by user (default: trivial layout)
_given_layout = SetLayout(initial_layout)
def _choose_layout_condition(property_set):
return not property_set['layout']
if layout_method == 'trivial':
_choose_layout = TrivialLayout(coupling_map)
elif layout_method == 'dense':
_choose_layout = DenseLayout(coupling_map, backend_properties)
elif layout_method == 'noise_adaptive':
_choose_layout = NoiseAdaptiveLayout(backend_properties)
elif layout_method == 'sabre':
_choose_layout = SabreLayout(coupling_map, max_iterations=1, seed=seed_transpiler)
else:
raise TranspilerError("Invalid layout method %s." % layout_method)
# 2. Extend dag/layout with ancillas using the full coupling map
_embed = [FullAncillaAllocation(coupling_map), EnlargeWithAncilla(), ApplyLayout()]
# 3. Decompose so only 1-qubit and 2-qubit gates remain
_unroll3q = Unroll3qOrMore()
# 4. Swap to fit the coupling map
_swap_check = CheckMap(coupling_map)
def _swap_condition(property_set):
return not property_set['is_swap_mapped']
_swap = [BarrierBeforeFinalMeasurements()]
if routing_method == 'basic':
_swap += [BasicSwap(coupling_map)]
elif routing_method == 'stochastic':
_swap += [StochasticSwap(coupling_map, trials=20, seed=seed_transpiler)]
elif routing_method == 'lookahead':
_swap += [LookaheadSwap(coupling_map, search_depth=2, search_width=2)]
elif routing_method == 'sabre':
_swap += [SabreSwap(coupling_map, heuristic='basic', seed=seed_transpiler)]
elif routing_method == 'none':
_swap += [Error(msg='No routing method selected, but circuit is not routed to device. CheckMap Error: '
'{check_map_msg}', action='raise')]
else:
raise TranspilerError("Invalid routing method %s." % routing_method)
# 5. Unroll to the basis
if translation_method == 'unroller':
_unroll = [Unroller(basis_gates)]
elif translation_method == 'translator':
from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel
_unroll = [UnrollCustomDefinitions(sel, basis_gates),
BasisTranslator(sel, basis_gates)]
elif translation_method == 'synthesis':
_unroll = [
Unroll3qOrMore(),
Collect2qBlocks(),
ConsolidateBlocks(basis_gates=basis_gates),
UnitarySynthesis(basis_gates),
]
else:
raise TranspilerError("Invalid translation method %s." % translation_method)
# 6. Fix any bad CX directions
_direction_check = [CheckCXDirection(coupling_map)]
def _direction_condition(property_set):
return not property_set['is_direction_mapped']
_direction = [CXDirection(coupling_map)]
# 7. Schedule the circuit only when scheduling_method is supplied
if scheduling_method:
_scheduling = [TimeUnitAnalysis(instruction_durations)]
if scheduling_method in {'alap', 'as_late_as_possible'}:
_scheduling += [ALAPSchedule(instruction_durations)]
elif scheduling_method in {'asap', 'as_soon_as_possible'}:
_scheduling += [ASAPSchedule(instruction_durations)]
else:
raise TranspilerError("Invalid scheduling method %s." % scheduling_method)
# Build pass manager
pm0 = PassManager()
if coupling_map:
pm0.append(_given_layout)
pm0.append(_choose_layout, condition=_choose_layout_condition)
pm0.append(_embed)
pm0.append(_unroll3q)
pm0.append(_swap_check)
pm0.append(_swap, condition=_swap_condition)
pm0.append(_unroll)
if coupling_map and not coupling_map.is_symmetric:
pm0.append(_direction_check)
pm0.append(_direction, condition=_direction_condition)
if scheduling_method:
pm0.append(_scheduling)
return pm0
|
5,898 |
def _with_legacy_tags(standard_tags, interpreter, impl):
# type: (List[Tag], str, Optional[str]) -> List[Tag]
"""For backwards compatibilty, add legacy tags that pip used to accept"""
all_supported_tags = standard_tags # Default to not making any changes
# pip used to calculate incorrect implementation tags for alternate
# implementations like PyPy, appending part of the implementation version,
# rather than using the nominal Python language version
legacy_interpreter = _get_custom_interpreter(impl)
if interpreter != legacy_interpreter:
all_supported_tags = [] # Build new list with extra tags inserted
for tag in standard_tags:
all_supported_tags.append(tag)
if tag.interpreter == interpreter:
legacy_tag = Tag(legacy_interpreter, tag.abi, tag.platform)
all_supported_tags.append(legacy_tag)
return all_supported_tags
|
def _with_legacy_tags(standard_tags, interpreter, impl):
# type: (List[Tag], str, Optional[str]) -> List[Tag]
"""For backwards compatibilty, add legacy tags that pip used to accept"""
all_supported_tags = standard_tags[:] # Default to not making any changes
# pip used to calculate incorrect implementation tags for alternate
# implementations like PyPy, appending part of the implementation version,
# rather than using the nominal Python language version
legacy_interpreter = _get_custom_interpreter(impl)
if interpreter != legacy_interpreter:
all_supported_tags = [] # Build new list with extra tags inserted
for tag in standard_tags:
all_supported_tags.append(tag)
if tag.interpreter == interpreter:
legacy_tag = Tag(legacy_interpreter, tag.abi, tag.platform)
all_supported_tags.append(legacy_tag)
return all_supported_tags
|
30,590 |
def main():
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
''' COMMANDS + REQUESTS FUNCTIONS '''
command = demisto.command()
applianceurl = demisto.params().get('applianceurl')
username = demisto.params().get('credentials').get('identifier')
password = demisto.params().get('credentials').get('password')
insecure = demisto.params().get('insecure', True)
LOG(f'Command being called is {demisto.command()}')
if command == 'test-module':
"""
Returning 'ok' indicates that the the user can login to EndaceProbe successfully with his credentials.
Returns:
'ok' if test passed, anything else will fail the test
"""
demisto.results(endace_test_command(applianceurl, username, password, insecure))
else:
""" Command Modules """
if command is None:
raise NotImplementedError(f'Command "{command}" is not implemented.')
else:
app = EndaceApp(applianceurl, username, password, insecure)
if command == "endace-create-search":
if demisto.args():
return_outputs(*endace_create_search_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-get-search-status":
if demisto.args():
return_outputs(*endace_get_search_status_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-delete-search-task":
if demisto.args():
return_outputs(*endace_delete_search_task_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-create-archive":
if demisto.args():
return_outputs(*endace_create_archive_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-get-archive-status":
if demisto.args():
return_outputs(*endace_get_archive_status_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-delete-archive-task":
if demisto.args():
return_outputs(*endace_delete_archive_task_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-delete-archived-file":
if demisto.args():
return_outputs(*endace_delete_archived_file_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-download-pcap":
if demisto.args():
return_outputs(*endace_download_pcap_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
else:
return_error(f'Command {demisto.command()} does not exist')
|
def main():
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
''' COMMANDS + REQUESTS FUNCTIONS '''
command = demisto.command()
applianceurl = demisto.params().get('applianceurl')
username = demisto.params().get('credentials').get('identifier')
password = demisto.params().get('credentials').get('password')
insecure = not demisto.params().get('insecure', False)
LOG(f'Command being called is {demisto.command()}')
if command == 'test-module':
"""
Returning 'ok' indicates that the the user can login to EndaceProbe successfully with his credentials.
Returns:
'ok' if test passed, anything else will fail the test
"""
demisto.results(endace_test_command(applianceurl, username, password, insecure))
else:
""" Command Modules """
if command is None:
raise NotImplementedError(f'Command "{command}" is not implemented.')
else:
app = EndaceApp(applianceurl, username, password, insecure)
if command == "endace-create-search":
if demisto.args():
return_outputs(*endace_create_search_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-get-search-status":
if demisto.args():
return_outputs(*endace_get_search_status_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-delete-search-task":
if demisto.args():
return_outputs(*endace_delete_search_task_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-create-archive":
if demisto.args():
return_outputs(*endace_create_archive_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-get-archive-status":
if demisto.args():
return_outputs(*endace_get_archive_status_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-delete-archive-task":
if demisto.args():
return_outputs(*endace_delete_archive_task_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-delete-archived-file":
if demisto.args():
return_outputs(*endace_delete_archived_file_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
elif command == "endace-download-pcap":
if demisto.args():
return_outputs(*endace_download_pcap_command(app, demisto.args()))
else:
raise ValueError('No function arguments')
else:
return_error(f'Command {demisto.command()} does not exist')
|
16,372 |
def get_sensor_types(device_sub_type: str) -> dict[str, Any]:
"""Return the proper info array for the device type."""
if "HobbyBoard" in device_sub_type:
return HOBBYBOARD_EF
return DEVICE_BINARY_SENSORS
|
def get_sensor_types(device_sub_type: str) -> dict[str, tuple[OneWireBinarySensorEntityDescription, ...]]:
"""Return the proper info array for the device type."""
if "HobbyBoard" in device_sub_type:
return HOBBYBOARD_EF
return DEVICE_BINARY_SENSORS
|
24,533 |
def test_different_input_types():
# Define some constants
wavelengths = np.arange(520, 545, 0.01) * u.nm
probe_wavelength = 532 * u.nm
n = 5e17 * u.cm ** -3
probe_vec = np.array([1, 0, 0])
scatter_vec = np.array([0, 1, 0])
ifract = np.array([1.0])
Te = np.array([10]) * u.eV
Ti = np.array([10]) * u.eV
ion_species = "C-12 5+"
# Raise a ValueError with inconsistent ion array lengths
with pytest.raises(ValueError):
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
Te,
Ti,
ifract=np.array([0.5, 0.5]),
ion_species=ion_species,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Raise a ValueError with inconsistent ion temperature array
with pytest.raises(ValueError):
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
Te,
np.array([5, 5]) * u.eV,
ifract=ifract,
ion_species=ion_species,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Raise a ValueError with empty ion_species
with pytest.raises(ValueError):
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
Te,
Ti,
ifract=ifract,
ion_species=[],
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Raise a Value Error with inconsistent electron array lengths
# Te.size != efract.size
with pytest.raises(ValueError):
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
np.array([1, 10]) * u.eV,
Ti,
efract=np.array([0.5, 0.2, 0.3]),
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Electorn vel shape not compatible with efract.size
with pytest.raises(ValueError):
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
Te,
Ti,
efract=np.array([0.5, 0.5]),
electron_vel=np.array([[100, 0, 0]]) * u.km / u.s,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
|
def test_different_input_types():
# Define some constants
wavelengths = np.arange(520, 545, 0.01) * u.nm
probe_wavelength = 532 * u.nm
n = 5e17 * u.cm ** -3
probe_vec = np.array([1, 0, 0])
scatter_vec = np.array([0, 1, 0])
ifract = np.array([1.0])
Te = np.array([10]) * u.eV
Ti = np.array([10]) * u.eV
ion_species = "C-12 5+"
# Raise a ValueError with inconsistent ion array lengths
with pytest.raises(ValueError):
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
Te,
Ti,
ifract=np.array([0.5, 0.5]),
ion_species=ion_species,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Raise a ValueError with inconsistent ion temperature array
with pytest.raises(ValueError):
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
Te,
np.array([5, 5]) * u.eV,
ifract=ifract,
ion_species=ion_species,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Raise a ValueError with empty ion_species
with pytest.raises(ValueError):
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
Te,
Ti,
ifract=ifract,
ion_species=[],
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Raise a Value Error with inconsistent electron array lengths
# Te.size != efract.size
with pytest.raises(ValueError):
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
np.array([1, 10]) * u.eV,
Ti,
efract=np.array([0.5, 0.2, 0.3]),
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Electron vel shape not compatible with efract.size
with pytest.raises(ValueError):
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
Te,
Ti,
efract=np.array([0.5, 0.5]),
electron_vel=np.array([[100, 0, 0]]) * u.km / u.s,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
|
43,178 |
def cached_cast(cast_fn, x, cache):
if is_nested(x):
return type(x)([cached_cast(y) for y in x])
if x in cache:
cached_x = cache[x]
next_functions_available = False
if x.requires_grad and cached_x.requires_grad:
if len(cached_x.grad_fn.next_functions) > 1:
next_functions_available = True
# Make sure x is actually cached_x's autograd parent.
if next_functions_available and cached_x.grad_fn.next_functions[1][0].variable is not x:
raise RuntimeError("x and cache[x] both require grad, but x is not "
"cache[x]'s parent. This is likely an error.")
# During eval, it's possible to end up caching casted weights with
# requires_grad=False. On the next training iter, if cached_x is found
# and reused from the cache, it will not actually have x as its parent.
# Therefore, we choose to invalidate the cache (and force refreshing the cast)
# if x.requires_grad and cached_x.requires_grad do not match.
#
# During eval (i.e. running under with torch.no_grad()) the invalidation
# check would cause the cached value to be dropped every time, because
# cached_x would always be created with requires_grad=False, while x would
# still have requires_grad=True. This would render the cache effectively
# useless during eval. Therefore, if we are running under the no_grad()
# context manager (torch.is_grad_enabled=False) we elide the invalidation
# check, and use the cached value even though its requires_grad flag doesn't
# match. During eval, we don't care that there's no autograd-graph
# connection between x and cached_x.
if torch.is_grad_enabled() and x.requires_grad != cached_x.requires_grad:
del cache[x]
elif x.requires_grad and cached_x.requires_grad and not next_functions_available:
del cache[x]
else:
return cached_x
casted_x = cast_fn(x)
cache[x] = casted_x
return casted_x
|
def cached_cast(cast_fn, x, cache):
if is_nested(x):
return type(x)([cached_cast(y) for y in x])
if x in cache:
cached_x = cache[x]
next_functions_available = False
if x.requires_grad and cached_x.requires_grad:
if len(cached_x.grad_fn.next_functions) > 1:
next_functions_available = True
# Make sure x is actually cached_x's autograd parent.
if len(cached_x.grad_fn.next_functions) > 1 and cached_x.grad_fn.next_functions[1][0].variable is not x:
raise RuntimeError("x and cache[x] both require grad, but x is not "
"cache[x]'s parent. This is likely an error.")
# During eval, it's possible to end up caching casted weights with
# requires_grad=False. On the next training iter, if cached_x is found
# and reused from the cache, it will not actually have x as its parent.
# Therefore, we choose to invalidate the cache (and force refreshing the cast)
# if x.requires_grad and cached_x.requires_grad do not match.
#
# During eval (i.e. running under with torch.no_grad()) the invalidation
# check would cause the cached value to be dropped every time, because
# cached_x would always be created with requires_grad=False, while x would
# still have requires_grad=True. This would render the cache effectively
# useless during eval. Therefore, if we are running under the no_grad()
# context manager (torch.is_grad_enabled=False) we elide the invalidation
# check, and use the cached value even though its requires_grad flag doesn't
# match. During eval, we don't care that there's no autograd-graph
# connection between x and cached_x.
if torch.is_grad_enabled() and x.requires_grad != cached_x.requires_grad:
del cache[x]
elif x.requires_grad and cached_x.requires_grad and not next_functions_available:
del cache[x]
else:
return cached_x
casted_x = cast_fn(x)
cache[x] = casted_x
return casted_x
|
34,025 |
def push_error_to_driver(worker, error_type: str, message: str, job_id=None):
"""Push an error message to the driver to be printed in the background.
Args:
worker: The worker to use.
error_type: The type of the error.
message: The message that will be printed in the background
on the driver.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
worker.core_worker.push_error(job_id, error_type, message, time.time())
|
def push_error_to_driver(worker, error_type: str, message: str, job_id: Optional[str] = None):
"""Push an error message to the driver to be printed in the background.
Args:
worker: The worker to use.
error_type: The type of the error.
message: The message that will be printed in the background
on the driver.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
worker.core_worker.push_error(job_id, error_type, message, time.time())
|
20,477 |
def fill_employee_contract_id(env):
contract = env['hr.contract']
employees = env['hr.employee'].search([])
for employee in employees:
employee.contract_id = contract.search([('employee_id', '=', employee.id)], order='date_start desc', limit=1)
|
def fill_employee_contract_id(env):
contract = env['hr.contract']
employees = env['hr.employee'].search([])
for employee in employees:
employee.contract_id = contract.search([('employee_id', '=', employee.id), ('company_id', '=', employee.company_id.id)], order='date_start desc', limit=1)
|
48,931 |
def to_graph(expr, node_attr=None, edge_attr=None):
graph = ibis.util.to_op_dag(expr.op())
g = gv.Digraph(
node_attr=node_attr or DEFAULT_NODE_ATTRS, edge_attr=edge_attr or {}
)
g.attr(rankdir='BT')
seen = set()
edges = set()
for v, us in graph.items():
if isinstance(v, ops.NodeList) and not v:
continue
vhash = str(hash(v))
if v not in seen:
g.node(vhash, label=get_label(v))
seen.add(v)
for u in us:
if isinstance(u, ops.NodeList) and not u:
continue
uhash = str(hash(u))
if u not in seen:
g.node(uhash, label=get_label(u))
seen.add(u)
if (edge := (u, v)) not in edges:
g.edge(uhash, vhash)
edges.add(edge)
return g
|
def to_graph(expr, node_attr=None, edge_attr=None):
graph = ibis.util.to_op_dag(expr.op())
g = gv.Digraph(
node_attr=node_attr or DEFAULT_NODE_ATTRS, edge_attr=edge_attr or {}
)
g.attr(rankdir='BT')
seen = set()
edges = set()
for v, us in graph.items():
if isinstance(v, ops.NodeList) and not v:
continue
vhash = str(hash(v))
if v not in seen:
g.node(vhash, label=get_label(v))
seen.add(v)
for u in us:
if isinstance(u, ops.NodeList) and not u:
continue
uhash = str(hash(u))
if u not in seen:
g.node(uhash, label=get_label(u))
seen.add(u)
if (edge := (u, v)) not in edges:
arg_name = None
if not isinstance(v, ops.NodeList):
arg_name = v.argnames[v.args.index(u)]
g.edge(uhash, vhash, label=arg_name)
edges.add(edge)
return g
|
22,781 |
def prepare_and_parse_args(plugins, args, detect_defaults=False):
"""Returns parsed command line arguments.
:param .PluginsRegistry plugins: available plugins
:param list args: command line arguments with the program name removed
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
helpful = HelpfulArgumentParser(args, plugins, detect_defaults)
_add_all_groups(helpful)
# --help is automatically provided by argparse
helpful.add(
None, "-v", "--verbose", dest="verbose_count", action="count",
default=flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
# This is for developers to set the level in the cli.ini, and overrides
# the --verbose flag
helpful.add(
None, "--verbose-level", dest="verbose_level",
default=flag_default("verbose_level"), help=argparse.SUPPRESS)
helpful.add(
None, "-t", "--text", dest="text_mode", action="store_true",
default=flag_default("text_mode"), help=argparse.SUPPRESS)
helpful.add(
None, "--max-log-backups", type=nonnegative_int,
default=flag_default("max_log_backups"),
help="Specifies the maximum number of backup logs that should "
"be kept by Certbot's built in log rotation. Setting this "
"flag to 0 disables log rotation entirely, causing "
"Certbot to always append to the same log file.")
helpful.add(
None, "--preconfigured-renewal", dest="preconfigured_renewal",
action="store_true", default=flag_default("preconfigured_renewal"),
help=argparse.SUPPRESS
)
helpful.add(
None, "--timeout", type=int,
dest="timeout",
default=flag_default("timeout"),help=config_help("timeout"))
helpful.add(
[None, "automation", "run", "certonly", "enhance"],
"-n", "--non-interactive", "--noninteractive",
dest="noninteractive_mode", action="store_true",
default=flag_default("noninteractive_mode"),
help="Run without ever asking for user input. This may require "
"additional command line flags; the client will try to explain "
"which ones are required if it finds one missing")
helpful.add(
[None, "register", "run", "certonly", "enhance"],
constants.FORCE_INTERACTIVE_FLAG, action="store_true",
default=flag_default("force_interactive"),
help="Force Certbot to be interactive even if it detects it's not "
"being run in a terminal. This flag cannot be used with the "
"renew subcommand.")
helpful.add(
[None, "run", "certonly", "certificates", "enhance"],
"-d", "--domains", "--domain", dest="domains",
metavar="DOMAIN", action=_DomainsAction,
default=flag_default("domains"),
help="Domain names to apply. For multiple domains you can use "
"multiple -d flags or enter a comma separated list of domains "
"as a parameter. The first domain provided will be the "
"subject CN of the certificate, and all domains will be "
"Subject Alternative Names on the certificate. "
"The first domain will also be used in "
"some software user interfaces and as the file paths for the "
"certificate and related material unless otherwise "
"specified or you already have a certificate with the same "
"name. In the case of a name collision it will append a number "
"like 0001 to the file path name. (default: Ask)")
helpful.add(
[None, "run", "certonly", "register"],
"--eab-kid", dest="eab_kid",
metavar="EAB_KID",
help="Key Identifier for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "register"],
"--eab-hmac-key", dest="eab_hmac_key",
metavar="EAB_HMAC_KEY",
help="HMAC key for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "manage", "delete", "certificates",
"renew", "enhance"], "--cert-name", dest="certname",
metavar="CERTNAME", default=flag_default("certname"),
help="Certificate name to apply. This name is used by Certbot for housekeeping "
"and in file paths; it doesn't affect the content of the certificate itself. "
"To see certificate names, run 'certbot certificates'. "
"When creating a new certificate, specifies the new certificate's name. "
"(default: the first provided domain or the name of an existing "
"certificate on your system for the same domains)")
helpful.add(
[None, "testing", "renew", "certonly"],
"--dry-run", action="store_true", dest="dry_run",
default=flag_default("dry_run"),
help="Perform a test run of the client, obtaining test (invalid) certificates"
" but not saving them to disk. This can currently only be used"
" with the 'certonly' and 'renew' subcommands. \nNote: Although --dry-run"
" tries to avoid making any persistent changes on a system, it "
" is not completely side-effect free: if used with webserver authenticator plugins"
" like apache and nginx, it makes and then reverts temporary config changes"
" in order to obtain test certificates, and reloads webservers to deploy and then"
" roll back those changes. It also calls --pre-hook and --post-hook commands"
" if they are defined because they may be necessary to accurately simulate"
" renewal. --deploy-hook commands are not called.")
helpful.add(
["register", "automation"], "--register-unsafely-without-email", action="store_true",
default=flag_default("register_unsafely_without_email"),
help="Specifying this flag enables registering an account with no "
"email address. This is strongly discouraged, because you will be "
"unable to receive notice about impending expiration or "
"revocation of your certificates or problems with your Certbot "
"installation that will lead to failure to renew.")
helpful.add(
["register", "update_account", "unregister", "automation"], "-m", "--email",
default=flag_default("email"),
help=config_help("email"))
helpful.add(["register", "update_account", "automation"], "--eff-email", action="store_true",
default=flag_default("eff_email"), dest="eff_email",
help="Share your e-mail address with EFF")
helpful.add(["register", "update_account", "automation"], "--no-eff-email",
action="store_false", default=flag_default("eff_email"), dest="eff_email",
help="Don't share your e-mail address with EFF")
helpful.add(
["automation", "certonly", "run"],
"--keep-until-expiring", "--keep", "--reinstall",
dest="reinstall", action="store_true", default=flag_default("reinstall"),
help="If the requested certificate matches an existing certificate, always keep the "
"existing one until it is due for renewal (for the "
"'run' subcommand this means reinstall the existing certificate). (default: Ask)")
helpful.add(
"automation", "--expand", action="store_true", default=flag_default("expand"),
help="If an existing certificate is a strict subset of the requested names, "
"always expand and replace it with the additional names. (default: Ask)")
helpful.add(
"automation", "--version", action="version",
version="%(prog)s {0}".format(certbot.__version__),
help="show program's version number and exit")
helpful.add(
["automation", "renew"],
"--force-renewal", "--renew-by-default", dest="renew_by_default",
action="store_true", default=flag_default("renew_by_default"),
help="If a certificate "
"already exists for the requested domains, renew it now, "
"regardless of whether it is near expiry. (Often "
"--keep-until-expiring is more appropriate). Also implies "
"--expand.")
helpful.add(
"automation", "--renew-with-new-domains", dest="renew_with_new_domains",
action="store_true", default=flag_default("renew_with_new_domains"),
help="If a "
"certificate already exists for the requested certificate name "
"but does not match the requested domains, renew it now, "
"regardless of whether it is near expiry.")
helpful.add(
"automation", "--reuse-key", dest="reuse_key",
action="store_true", default=flag_default("reuse_key"),
help="When renewing, use the same private key as the existing "
"certificate.")
helpful.add(
"automation", "--no-reuse-key", dest="reuse_key",
action="store_false", default=flag_default("reuse_key"),
help="When renewing, do not use the same private key as the existing "
"certificate. Not reusing private keys is the default behavior of "
"Certbot. This option may be used to unset --reuse-key on an "
"existing certificate.")
helpful.add(
["automation", "renew", "certonly"],
"--allow-subset-of-names", action="store_true",
default=flag_default("allow_subset_of_names"),
help="When performing domain validation, do not consider it a failure "
"if authorizations can not be obtained for a strict subset of "
"the requested domains. This may be useful for allowing renewals for "
"multiple domains to succeed even if some domains no longer point "
"at this system. This option cannot be used with --csr.")
helpful.add(
"automation", "--agree-tos", dest="tos", action="store_true",
default=flag_default("tos"),
help="Agree to the ACME Subscriber Agreement (default: Ask)")
helpful.add(
["unregister", "automation"], "--account", metavar="ACCOUNT_ID",
default=flag_default("account"),
help="Account ID to use")
helpful.add(
"automation", "--duplicate", dest="duplicate", action="store_true",
default=flag_default("duplicate"),
help="Allow making a certificate lineage that duplicates an existing one "
"(both can be renewed in parallel)")
helpful.add(
["automation", "renew", "certonly", "run"],
"-q", "--quiet", dest="quiet", action="store_true",
default=flag_default("quiet"),
help="Silence all output except errors. Useful for automation via cron."
" Implies --non-interactive.")
# overwrites server, handled in HelpfulArgumentParser.parse_args()
helpful.add(["testing", "revoke", "run"], "--test-cert", "--staging",
dest="staging", action="store_true", default=flag_default("staging"),
help="Use the staging server to obtain or revoke test (invalid) certificates; equivalent"
" to --server " + constants.STAGING_URI)
helpful.add(
"testing", "--debug", action="store_true", default=flag_default("debug"),
help="Show tracebacks in case of errors")
helpful.add(
[None, "certonly", "run"], "--debug-challenges", action="store_true",
default=flag_default("debug_challenges"),
help="After setting up challenges, wait for user input before "
"submitting to CA")
helpful.add(
"testing", "--no-verify-ssl", action="store_true",
help=config_help("no_verify_ssl"),
default=flag_default("no_verify_ssl"))
helpful.add(
["testing", "standalone", "manual"], "--http-01-port", type=int,
dest="http01_port",
default=flag_default("http01_port"), help=config_help("http01_port"))
helpful.add(
["testing", "standalone"], "--http-01-address",
dest="http01_address",
default=flag_default("http01_address"), help=config_help("http01_address"))
helpful.add(
["testing", "nginx"], "--https-port", type=int,
default=flag_default("https_port"),
help=config_help("https_port"))
helpful.add(
"testing", "--break-my-certs", action="store_true",
default=flag_default("break_my_certs"),
help="Be willing to replace or renew valid certificates with invalid "
"(testing/staging) certificates")
helpful.add(
"security", "--rsa-key-size", type=int, metavar="N",
default=flag_default("rsa_key_size"), help=config_help("rsa_key_size"))
helpful.add(
"security", "--key-type", choices=['rsa', 'ecdsa'], type=str,
default=flag_default("key_type"), help=config_help("key_type"))
helpful.add(
"security", "--elliptic-curve", type=str, choices=[
'secp256r1',
'secp384r1',
'secp521r1',
], metavar="N",
default=flag_default("elliptic_curve"), help=config_help("elliptic_curve"))
helpful.add(
"security", "--must-staple", action="store_true",
dest="must_staple", default=flag_default("must_staple"),
help=config_help("must_staple"))
helpful.add(
["security", "enhance"],
"--redirect", action="store_true", dest="redirect",
default=flag_default("redirect"),
help="Automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: redirect enabled for install and run, "
"disabled for enhance)")
helpful.add(
"security", "--no-redirect", action="store_false", dest="redirect",
default=flag_default("redirect"),
help="Do not automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: redirect enabled for install and run, "
"disabled for enhance)")
helpful.add(
["security", "enhance"],
"--hsts", action="store_true", dest="hsts", default=flag_default("hsts"),
help="Add the Strict-Transport-Security header to every HTTP response."
" Forcing browser to always use SSL for the domain."
" Defends against SSL Stripping.")
helpful.add(
"security", "--no-hsts", action="store_false", dest="hsts",
default=flag_default("hsts"), help=argparse.SUPPRESS)
helpful.add(
["security", "enhance"],
"--uir", action="store_true", dest="uir", default=flag_default("uir"),
help='Add the "Content-Security-Policy: upgrade-insecure-requests"'
' header to every HTTP response. Forcing the browser to use'
' https:// for every http:// resource.')
helpful.add(
"security", "--no-uir", action="store_false", dest="uir", default=flag_default("uir"),
help=argparse.SUPPRESS)
helpful.add(
"security", "--staple-ocsp", action="store_true", dest="staple",
default=flag_default("staple"),
help="Enables OCSP Stapling. A valid OCSP response is stapled to"
" the certificate that the server offers during TLS.")
helpful.add(
"security", "--no-staple-ocsp", action="store_false", dest="staple",
default=flag_default("staple"), help=argparse.SUPPRESS)
helpful.add(
"security", "--strict-permissions", action="store_true",
default=flag_default("strict_permissions"),
help="Require that all configuration files are owned by the current "
"user; only needed if your config is somewhere unsafe like /tmp/")
helpful.add(
[None, "certonly", "renew", "run"],
"--preferred-chain", dest="preferred_chain",
default=flag_default("preferred_chain"), help=config_help("preferred_chain")
)
helpful.add(
["manual", "standalone", "certonly", "renew"],
"--preferred-challenges", dest="pref_challs",
action=_PrefChallAction, default=flag_default("pref_challs"),
help='A sorted, comma delimited list of the preferred challenge to '
'use during authorization with the most preferred challenge '
'listed first (Eg, "dns" or "http,dns"). '
'Not all plugins support all challenges. See '
'https://certbot.eff.org/docs/using.html#plugins for details. '
'ACME Challenges are versioned, but if you pick "http" rather '
'than "http-01", Certbot will select the latest version '
'automatically.')
helpful.add(
"renew", "--pre-hook",
help="Command to be run in a shell before obtaining any certificates."
" Intended primarily for renewal, where it can be used to temporarily"
" shut down a webserver that might conflict with the standalone"
" plugin. This will only be called if a certificate is actually to be"
" obtained/renewed. When renewing several certificates that have"
" identical pre-hooks, only the first will be executed.")
helpful.add(
"renew", "--post-hook",
help="Command to be run in a shell after attempting to obtain/renew"
" certificates. Can be used to deploy renewed certificates, or to"
" restart any servers that were stopped by --pre-hook. This is only"
" run if an attempt was made to obtain/renew a certificate. If"
" multiple renewed certificates have identical post-hooks, only"
" one will be run.")
helpful.add("renew", "--renew-hook",
action=_RenewHookAction, help=argparse.SUPPRESS)
helpful.add(
"renew", "--no-random-sleep-on-renew", action="store_false",
default=flag_default("random_sleep_on_renew"), dest="random_sleep_on_renew",
help=argparse.SUPPRESS)
helpful.add(
"renew", "--deploy-hook", action=_DeployHookAction,
help='Command to be run in a shell once for each successfully'
' issued certificate. For this command, the shell variable'
' $RENEWED_LINEAGE will point to the config live subdirectory'
' (for example, "/etc/letsencrypt/live/example.com") containing'
' the new certificates and keys; the shell variable'
' $RENEWED_DOMAINS will contain a space-delimited list of'
' renewed certificate domains (for example, "example.com'
' www.example.com"')
helpful.add(
"renew", "--disable-hook-validation",
action="store_false", dest="validate_hooks",
default=flag_default("validate_hooks"),
help="Ordinarily the commands specified for"
" --pre-hook/--post-hook/--deploy-hook will be checked for"
" validity, to see if the programs being run are in the $PATH,"
" so that mistakes can be caught early, even when the hooks"
" aren't being run just yet. The validation is rather"
" simplistic and fails if you use more advanced shell"
" constructs, so you can use this switch to disable it."
" (default: False)")
helpful.add(
"renew", "--no-directory-hooks", action="store_false",
default=flag_default("directory_hooks"), dest="directory_hooks",
help="Disable running executables found in Certbot's hook directories"
" during renewal. (default: False)")
helpful.add(
"renew", "--disable-renew-updates", action="store_true",
default=flag_default("disable_renew_updates"), dest="disable_renew_updates",
help="Disable automatic updates to your server configuration that"
" would otherwise be done by the selected installer plugin, and triggered"
" when the user executes \"certbot renew\", regardless of if the certificate"
" is renewed. This setting does not apply to important TLS configuration"
" updates.")
helpful.add(
"renew", "--no-autorenew", action="store_false",
default=flag_default("autorenew"), dest="autorenew",
help="Disable auto renewal of certificates.")
# Deprecated arguments
helpful.add_deprecated_argument("--os-packages-only", 0)
helpful.add_deprecated_argument("--no-self-upgrade", 0)
helpful.add_deprecated_argument("--no-bootstrap", 0)
helpful.add_deprecated_argument("--no-permissions-check", 0)
# Populate the command line parameters for new style enhancements
enhancements.populate_cli(helpful.add)
_create_subparsers(helpful)
_paths_parser(helpful)
# _plugins_parsing should be the last thing to act upon the main
# parser (--help should display plugin-specific options last)
_plugins_parsing(helpful, plugins)
if not detect_defaults:
global helpful_parser # pylint: disable=global-statement
helpful_parser = helpful
return helpful.parse_args()
|
def prepare_and_parse_args(plugins, args, detect_defaults=False):
"""Returns parsed command line arguments.
:param .PluginsRegistry plugins: available plugins
:param list args: command line arguments with the program name removed
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
helpful = HelpfulArgumentParser(args, plugins, detect_defaults)
_add_all_groups(helpful)
# --help is automatically provided by argparse
helpful.add(
None, "-v", "--verbose", dest="verbose_count", action="count",
default=flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
# This is for developers to set the level in the cli.ini, and overrides
# the --verbose flag
helpful.add(
None, "--verbose-level", dest="verbose_level",
default=flag_default("verbose_level"), help=argparse.SUPPRESS)
helpful.add(
None, "-t", "--text", dest="text_mode", action="store_true",
default=flag_default("text_mode"), help=argparse.SUPPRESS)
helpful.add(
None, "--max-log-backups", type=nonnegative_int,
default=flag_default("max_log_backups"),
help="Specifies the maximum number of backup logs that should "
"be kept by Certbot's built in log rotation. Setting this "
"flag to 0 disables log rotation entirely, causing "
"Certbot to always append to the same log file.")
helpful.add(
None, "--preconfigured-renewal", dest="preconfigured_renewal",
action="store_true", default=flag_default("preconfigured_renewal"),
help=argparse.SUPPRESS
)
helpful.add(
None, "--timeout", type=int,
dest="timeout",
default=flag_default("timeout"), help=config_help("timeout"))
helpful.add(
[None, "automation", "run", "certonly", "enhance"],
"-n", "--non-interactive", "--noninteractive",
dest="noninteractive_mode", action="store_true",
default=flag_default("noninteractive_mode"),
help="Run without ever asking for user input. This may require "
"additional command line flags; the client will try to explain "
"which ones are required if it finds one missing")
helpful.add(
[None, "register", "run", "certonly", "enhance"],
constants.FORCE_INTERACTIVE_FLAG, action="store_true",
default=flag_default("force_interactive"),
help="Force Certbot to be interactive even if it detects it's not "
"being run in a terminal. This flag cannot be used with the "
"renew subcommand.")
helpful.add(
[None, "run", "certonly", "certificates", "enhance"],
"-d", "--domains", "--domain", dest="domains",
metavar="DOMAIN", action=_DomainsAction,
default=flag_default("domains"),
help="Domain names to apply. For multiple domains you can use "
"multiple -d flags or enter a comma separated list of domains "
"as a parameter. The first domain provided will be the "
"subject CN of the certificate, and all domains will be "
"Subject Alternative Names on the certificate. "
"The first domain will also be used in "
"some software user interfaces and as the file paths for the "
"certificate and related material unless otherwise "
"specified or you already have a certificate with the same "
"name. In the case of a name collision it will append a number "
"like 0001 to the file path name. (default: Ask)")
helpful.add(
[None, "run", "certonly", "register"],
"--eab-kid", dest="eab_kid",
metavar="EAB_KID",
help="Key Identifier for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "register"],
"--eab-hmac-key", dest="eab_hmac_key",
metavar="EAB_HMAC_KEY",
help="HMAC key for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "manage", "delete", "certificates",
"renew", "enhance"], "--cert-name", dest="certname",
metavar="CERTNAME", default=flag_default("certname"),
help="Certificate name to apply. This name is used by Certbot for housekeeping "
"and in file paths; it doesn't affect the content of the certificate itself. "
"To see certificate names, run 'certbot certificates'. "
"When creating a new certificate, specifies the new certificate's name. "
"(default: the first provided domain or the name of an existing "
"certificate on your system for the same domains)")
helpful.add(
[None, "testing", "renew", "certonly"],
"--dry-run", action="store_true", dest="dry_run",
default=flag_default("dry_run"),
help="Perform a test run of the client, obtaining test (invalid) certificates"
" but not saving them to disk. This can currently only be used"
" with the 'certonly' and 'renew' subcommands. \nNote: Although --dry-run"
" tries to avoid making any persistent changes on a system, it "
" is not completely side-effect free: if used with webserver authenticator plugins"
" like apache and nginx, it makes and then reverts temporary config changes"
" in order to obtain test certificates, and reloads webservers to deploy and then"
" roll back those changes. It also calls --pre-hook and --post-hook commands"
" if they are defined because they may be necessary to accurately simulate"
" renewal. --deploy-hook commands are not called.")
helpful.add(
["register", "automation"], "--register-unsafely-without-email", action="store_true",
default=flag_default("register_unsafely_without_email"),
help="Specifying this flag enables registering an account with no "
"email address. This is strongly discouraged, because you will be "
"unable to receive notice about impending expiration or "
"revocation of your certificates or problems with your Certbot "
"installation that will lead to failure to renew.")
helpful.add(
["register", "update_account", "unregister", "automation"], "-m", "--email",
default=flag_default("email"),
help=config_help("email"))
helpful.add(["register", "update_account", "automation"], "--eff-email", action="store_true",
default=flag_default("eff_email"), dest="eff_email",
help="Share your e-mail address with EFF")
helpful.add(["register", "update_account", "automation"], "--no-eff-email",
action="store_false", default=flag_default("eff_email"), dest="eff_email",
help="Don't share your e-mail address with EFF")
helpful.add(
["automation", "certonly", "run"],
"--keep-until-expiring", "--keep", "--reinstall",
dest="reinstall", action="store_true", default=flag_default("reinstall"),
help="If the requested certificate matches an existing certificate, always keep the "
"existing one until it is due for renewal (for the "
"'run' subcommand this means reinstall the existing certificate). (default: Ask)")
helpful.add(
"automation", "--expand", action="store_true", default=flag_default("expand"),
help="If an existing certificate is a strict subset of the requested names, "
"always expand and replace it with the additional names. (default: Ask)")
helpful.add(
"automation", "--version", action="version",
version="%(prog)s {0}".format(certbot.__version__),
help="show program's version number and exit")
helpful.add(
["automation", "renew"],
"--force-renewal", "--renew-by-default", dest="renew_by_default",
action="store_true", default=flag_default("renew_by_default"),
help="If a certificate "
"already exists for the requested domains, renew it now, "
"regardless of whether it is near expiry. (Often "
"--keep-until-expiring is more appropriate). Also implies "
"--expand.")
helpful.add(
"automation", "--renew-with-new-domains", dest="renew_with_new_domains",
action="store_true", default=flag_default("renew_with_new_domains"),
help="If a "
"certificate already exists for the requested certificate name "
"but does not match the requested domains, renew it now, "
"regardless of whether it is near expiry.")
helpful.add(
"automation", "--reuse-key", dest="reuse_key",
action="store_true", default=flag_default("reuse_key"),
help="When renewing, use the same private key as the existing "
"certificate.")
helpful.add(
"automation", "--no-reuse-key", dest="reuse_key",
action="store_false", default=flag_default("reuse_key"),
help="When renewing, do not use the same private key as the existing "
"certificate. Not reusing private keys is the default behavior of "
"Certbot. This option may be used to unset --reuse-key on an "
"existing certificate.")
helpful.add(
["automation", "renew", "certonly"],
"--allow-subset-of-names", action="store_true",
default=flag_default("allow_subset_of_names"),
help="When performing domain validation, do not consider it a failure "
"if authorizations can not be obtained for a strict subset of "
"the requested domains. This may be useful for allowing renewals for "
"multiple domains to succeed even if some domains no longer point "
"at this system. This option cannot be used with --csr.")
helpful.add(
"automation", "--agree-tos", dest="tos", action="store_true",
default=flag_default("tos"),
help="Agree to the ACME Subscriber Agreement (default: Ask)")
helpful.add(
["unregister", "automation"], "--account", metavar="ACCOUNT_ID",
default=flag_default("account"),
help="Account ID to use")
helpful.add(
"automation", "--duplicate", dest="duplicate", action="store_true",
default=flag_default("duplicate"),
help="Allow making a certificate lineage that duplicates an existing one "
"(both can be renewed in parallel)")
helpful.add(
["automation", "renew", "certonly", "run"],
"-q", "--quiet", dest="quiet", action="store_true",
default=flag_default("quiet"),
help="Silence all output except errors. Useful for automation via cron."
" Implies --non-interactive.")
# overwrites server, handled in HelpfulArgumentParser.parse_args()
helpful.add(["testing", "revoke", "run"], "--test-cert", "--staging",
dest="staging", action="store_true", default=flag_default("staging"),
help="Use the staging server to obtain or revoke test (invalid) certificates; equivalent"
" to --server " + constants.STAGING_URI)
helpful.add(
"testing", "--debug", action="store_true", default=flag_default("debug"),
help="Show tracebacks in case of errors")
helpful.add(
[None, "certonly", "run"], "--debug-challenges", action="store_true",
default=flag_default("debug_challenges"),
help="After setting up challenges, wait for user input before "
"submitting to CA")
helpful.add(
"testing", "--no-verify-ssl", action="store_true",
help=config_help("no_verify_ssl"),
default=flag_default("no_verify_ssl"))
helpful.add(
["testing", "standalone", "manual"], "--http-01-port", type=int,
dest="http01_port",
default=flag_default("http01_port"), help=config_help("http01_port"))
helpful.add(
["testing", "standalone"], "--http-01-address",
dest="http01_address",
default=flag_default("http01_address"), help=config_help("http01_address"))
helpful.add(
["testing", "nginx"], "--https-port", type=int,
default=flag_default("https_port"),
help=config_help("https_port"))
helpful.add(
"testing", "--break-my-certs", action="store_true",
default=flag_default("break_my_certs"),
help="Be willing to replace or renew valid certificates with invalid "
"(testing/staging) certificates")
helpful.add(
"security", "--rsa-key-size", type=int, metavar="N",
default=flag_default("rsa_key_size"), help=config_help("rsa_key_size"))
helpful.add(
"security", "--key-type", choices=['rsa', 'ecdsa'], type=str,
default=flag_default("key_type"), help=config_help("key_type"))
helpful.add(
"security", "--elliptic-curve", type=str, choices=[
'secp256r1',
'secp384r1',
'secp521r1',
], metavar="N",
default=flag_default("elliptic_curve"), help=config_help("elliptic_curve"))
helpful.add(
"security", "--must-staple", action="store_true",
dest="must_staple", default=flag_default("must_staple"),
help=config_help("must_staple"))
helpful.add(
["security", "enhance"],
"--redirect", action="store_true", dest="redirect",
default=flag_default("redirect"),
help="Automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: redirect enabled for install and run, "
"disabled for enhance)")
helpful.add(
"security", "--no-redirect", action="store_false", dest="redirect",
default=flag_default("redirect"),
help="Do not automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: redirect enabled for install and run, "
"disabled for enhance)")
helpful.add(
["security", "enhance"],
"--hsts", action="store_true", dest="hsts", default=flag_default("hsts"),
help="Add the Strict-Transport-Security header to every HTTP response."
" Forcing browser to always use SSL for the domain."
" Defends against SSL Stripping.")
helpful.add(
"security", "--no-hsts", action="store_false", dest="hsts",
default=flag_default("hsts"), help=argparse.SUPPRESS)
helpful.add(
["security", "enhance"],
"--uir", action="store_true", dest="uir", default=flag_default("uir"),
help='Add the "Content-Security-Policy: upgrade-insecure-requests"'
' header to every HTTP response. Forcing the browser to use'
' https:// for every http:// resource.')
helpful.add(
"security", "--no-uir", action="store_false", dest="uir", default=flag_default("uir"),
help=argparse.SUPPRESS)
helpful.add(
"security", "--staple-ocsp", action="store_true", dest="staple",
default=flag_default("staple"),
help="Enables OCSP Stapling. A valid OCSP response is stapled to"
" the certificate that the server offers during TLS.")
helpful.add(
"security", "--no-staple-ocsp", action="store_false", dest="staple",
default=flag_default("staple"), help=argparse.SUPPRESS)
helpful.add(
"security", "--strict-permissions", action="store_true",
default=flag_default("strict_permissions"),
help="Require that all configuration files are owned by the current "
"user; only needed if your config is somewhere unsafe like /tmp/")
helpful.add(
[None, "certonly", "renew", "run"],
"--preferred-chain", dest="preferred_chain",
default=flag_default("preferred_chain"), help=config_help("preferred_chain")
)
helpful.add(
["manual", "standalone", "certonly", "renew"],
"--preferred-challenges", dest="pref_challs",
action=_PrefChallAction, default=flag_default("pref_challs"),
help='A sorted, comma delimited list of the preferred challenge to '
'use during authorization with the most preferred challenge '
'listed first (Eg, "dns" or "http,dns"). '
'Not all plugins support all challenges. See '
'https://certbot.eff.org/docs/using.html#plugins for details. '
'ACME Challenges are versioned, but if you pick "http" rather '
'than "http-01", Certbot will select the latest version '
'automatically.')
helpful.add(
"renew", "--pre-hook",
help="Command to be run in a shell before obtaining any certificates."
" Intended primarily for renewal, where it can be used to temporarily"
" shut down a webserver that might conflict with the standalone"
" plugin. This will only be called if a certificate is actually to be"
" obtained/renewed. When renewing several certificates that have"
" identical pre-hooks, only the first will be executed.")
helpful.add(
"renew", "--post-hook",
help="Command to be run in a shell after attempting to obtain/renew"
" certificates. Can be used to deploy renewed certificates, or to"
" restart any servers that were stopped by --pre-hook. This is only"
" run if an attempt was made to obtain/renew a certificate. If"
" multiple renewed certificates have identical post-hooks, only"
" one will be run.")
helpful.add("renew", "--renew-hook",
action=_RenewHookAction, help=argparse.SUPPRESS)
helpful.add(
"renew", "--no-random-sleep-on-renew", action="store_false",
default=flag_default("random_sleep_on_renew"), dest="random_sleep_on_renew",
help=argparse.SUPPRESS)
helpful.add(
"renew", "--deploy-hook", action=_DeployHookAction,
help='Command to be run in a shell once for each successfully'
' issued certificate. For this command, the shell variable'
' $RENEWED_LINEAGE will point to the config live subdirectory'
' (for example, "/etc/letsencrypt/live/example.com") containing'
' the new certificates and keys; the shell variable'
' $RENEWED_DOMAINS will contain a space-delimited list of'
' renewed certificate domains (for example, "example.com'
' www.example.com"')
helpful.add(
"renew", "--disable-hook-validation",
action="store_false", dest="validate_hooks",
default=flag_default("validate_hooks"),
help="Ordinarily the commands specified for"
" --pre-hook/--post-hook/--deploy-hook will be checked for"
" validity, to see if the programs being run are in the $PATH,"
" so that mistakes can be caught early, even when the hooks"
" aren't being run just yet. The validation is rather"
" simplistic and fails if you use more advanced shell"
" constructs, so you can use this switch to disable it."
" (default: False)")
helpful.add(
"renew", "--no-directory-hooks", action="store_false",
default=flag_default("directory_hooks"), dest="directory_hooks",
help="Disable running executables found in Certbot's hook directories"
" during renewal. (default: False)")
helpful.add(
"renew", "--disable-renew-updates", action="store_true",
default=flag_default("disable_renew_updates"), dest="disable_renew_updates",
help="Disable automatic updates to your server configuration that"
" would otherwise be done by the selected installer plugin, and triggered"
" when the user executes \"certbot renew\", regardless of if the certificate"
" is renewed. This setting does not apply to important TLS configuration"
" updates.")
helpful.add(
"renew", "--no-autorenew", action="store_false",
default=flag_default("autorenew"), dest="autorenew",
help="Disable auto renewal of certificates.")
# Deprecated arguments
helpful.add_deprecated_argument("--os-packages-only", 0)
helpful.add_deprecated_argument("--no-self-upgrade", 0)
helpful.add_deprecated_argument("--no-bootstrap", 0)
helpful.add_deprecated_argument("--no-permissions-check", 0)
# Populate the command line parameters for new style enhancements
enhancements.populate_cli(helpful.add)
_create_subparsers(helpful)
_paths_parser(helpful)
# _plugins_parsing should be the last thing to act upon the main
# parser (--help should display plugin-specific options last)
_plugins_parsing(helpful, plugins)
if not detect_defaults:
global helpful_parser # pylint: disable=global-statement
helpful_parser = helpful
return helpful.parse_args()
|
24,711 |
def parameter_dict_from_yaml_file(
parameter_file: str,
use_wildcard: bool = False,
target_nodes: Optional[List[str]] = None,
namespace: str = ''
) -> Dict[str, ParameterMsg]:
"""
Build a dict of parameters from a YAML file formatted as per ``ros2 param dump``.
Will load all parameters if ``target_nodes`` is None
:param parameter_file: Path to the YAML file to load parameters from.
:param use_wildcard: Use wildcard matching for the target nodes.
:param target_nodes: List of nodes in the YAML file to load parameters from.
:param namespace: Namespace to prepend to all parameters.
:return: A dict of Parameter objects keyed by the parameter names
"""
with open(parameter_file, 'r') as f:
param_file = yaml.safe_load(f)
param_keys = []
param_dict = {}
if use_wildcard and '/**' in param_file:
param_keys.append('/**')
if target_nodes:
for n in target_nodes:
if n not in param_file.keys():
raise RuntimeError(f'Param file does not contain parameters for {n},'
f'only for nodes: {list(param_file.keys())} ')
param_keys.append(n)
else:
# wildcard key must go to the front of param_keys so that
# node-namespaced parameters will override the wildcard parameters
keys = set(param_file.keys())
keys.discard('/**')
param_keys.extend(keys)
if len(param_keys) == 0:
raise RuntimeError('Param file does not contain selected parameters')
for n in param_keys:
value = param_file[n]
if type(value) != dict or 'ros__parameters' not in value:
raise RuntimeError('Invalid structure of parameter file for node {}'
'expected same format as provided by ros2 param dump'
.format(n))
param_dict.update(value['ros__parameters'])
return _unpack_parameter_dict(namespace, param_dict)
|
def parameter_dict_from_yaml_file(
parameter_file: str,
use_wildcard: bool = False,
target_nodes: Optional[List[str]] = None,
namespace: str = ''
) -> Dict[str, ParameterMsg]:
"""
Build a dict of parameters from a YAML file formatted as per ``ros2 param dump``.
Will load all parameters if ``target_nodes`` is None
:param parameter_file: Path to the YAML file to load parameters from.
:param use_wildcard: Use wildcard matching for the target nodes.
:param target_nodes: List of nodes in the YAML file to load parameters from.
:param namespace: Namespace to prepend to all parameters.
:return: A dict of Parameter messages keyed by the parameter names
"""
with open(parameter_file, 'r') as f:
param_file = yaml.safe_load(f)
param_keys = []
param_dict = {}
if use_wildcard and '/**' in param_file:
param_keys.append('/**')
if target_nodes:
for n in target_nodes:
if n not in param_file.keys():
raise RuntimeError(f'Param file does not contain parameters for {n},'
f'only for nodes: {list(param_file.keys())} ')
param_keys.append(n)
else:
# wildcard key must go to the front of param_keys so that
# node-namespaced parameters will override the wildcard parameters
keys = set(param_file.keys())
keys.discard('/**')
param_keys.extend(keys)
if len(param_keys) == 0:
raise RuntimeError('Param file does not contain selected parameters')
for n in param_keys:
value = param_file[n]
if type(value) != dict or 'ros__parameters' not in value:
raise RuntimeError('Invalid structure of parameter file for node {}'
'expected same format as provided by ros2 param dump'
.format(n))
param_dict.update(value['ros__parameters'])
return _unpack_parameter_dict(namespace, param_dict)
|
45,205 |
def file_open(file_path, mode="rb", kwargs=None):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
if match:
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
s3fs = S3FS.S3FileSystem(anon=False)
try:
return s3fs.open(file_path)
except NoCredentialsError:
s3fs = S3FS.S3FileSystem(anon=True)
return s3fs.open(file_path)
elif "compression" in kwargs:
if kwargs["compression"] == "gzip":
import gzip
return gzip.open(file_path, mode=mode)
return open(file_path, mode=mode)
|
def file_open(file_path, mode="rb", kwargs=None):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
if match:
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
s3fs = S3FS.S3FileSystem(anon=False)
try:
return s3fs.open(file_path)
except NoCredentialsError:
s3fs = S3FS.S3FileSystem(anon=True)
return s3fs.open(file_path)
elif "compression" in kwargs:
if kwargs["compression"] == "gzip":
import gzip
return gzip.open(file_path, mode=mode)
return open(file_path, mode=mode)
|
17,444 |
def open_dataset(
filename_or_obj,
*args,
engine=None,
chunks=None,
cache=None,
decode_cf=None,
mask_and_scale=None,
decode_times=None,
decode_timedelta=None,
use_cftime=None,
concat_characters=None,
decode_coords=None,
drop_variables=None,
backend_kwargs=None,
**kwargs,
):
"""Open and decode a dataset from a file or file-like object.
Parameters
----------
filename_or_obj : str, Path, file-like or DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib", \
"pseudonetcdf", "zarr"} or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``)
can also be used.
chunks : int or dict, optional
If chunks is provided, it is used to load the new dataset into dask
arrays. ``chunks=-1`` loads the dataset with dask using a single
chunk for all arrays. `chunks={}`` loads the dataset with dask using
engine preferred chunks if exposed by the backend, otherwise with
a single chunk for all arrays.
``chunks='auto'`` will use dask ``auto`` chunking taking into account the
engine preferred chunks. See dask chunking for more details.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend. This keyword may not be supported by all the backends.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
This keyword may not be supported by all the backends.
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
This keyword may not be supported by all the backends.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error. This keyword may not be supported by all the backends.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
This keyword may not be supported by all the backends.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
- "coordinates" or True: Set variables referred to in the
``'coordinates'`` attribute of the datasets or individual variables
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
drop_variables: str or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
backend_kwargs: dict
Additional keyword arguments passed on to the engine open function,
equivalent to `**kwargs`.
**kwargs: dict
Additional keyword arguments passed on to the engine open function.
For example:
- 'group': path to the netCDF4 group in the given file to open given as
a str,supported by "netcdf4", "h5netcdf", "zarr".
- 'lock': (Deprecated) resource lock to use when reading data from disk. Only
relevant when using dask or another form of parallelism. By default,
appropriate locks are chosen to safely read and write files with the
currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
"pynio", "pseudonetcdf", "cfgrib".
See engine open function for kwargs accepted by each specific engine.
Returns
-------
dataset : Dataset
The newly created dataset.
Notes
-----
``open_dataset`` opens the file with read-only access. When you modify
values of a Dataset, even one linked to files on disk, only the in-memory
copy you are manipulating in xarray is modified: the original file on disk
is never touched.
See Also
--------
open_mfdataset
"""
if len(args) > 0:
raise TypeError(
"open_dataset() takes only 1 positional argument starting from version 0.18.0, "
"all other options must be passed as keyword arguments"
)
# TODO remove after v0.19
if kwargs.pop("lock", None):
warnings.warn(
"The kwarg 'lock' has been deprecated, and is now"
"ignored. In future (from v0.19) passing lock will "
"raise an error.",
DeprecationWarning,
)
if cache is None:
cache = chunks is None
if backend_kwargs is not None:
kwargs.update(backend_kwargs)
if engine is None:
engine = plugins.guess_engine(filename_or_obj)
backend = plugins.get_backend(engine)
decoders = _resolve_decoders_kwargs(
decode_cf,
open_backend_dataset_parameters=backend.open_dataset_parameters,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
decode_timedelta=decode_timedelta,
concat_characters=concat_characters,
use_cftime=use_cftime,
decode_coords=decode_coords,
)
overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None)
backend_ds = backend.open_dataset(
filename_or_obj,
drop_variables=drop_variables,
**decoders,
**kwargs,
)
ds = _dataset_from_backend_dataset(
backend_ds,
filename_or_obj,
engine,
chunks,
cache,
overwrite_encoded_chunks,
drop_variables=drop_variables,
**decoders,
**kwargs,
)
return ds
|
def open_dataset(
filename_or_obj,
*args,
engine=None,
chunks=None,
cache=None,
decode_cf=None,
mask_and_scale=None,
decode_times=None,
decode_timedelta=None,
use_cftime=None,
concat_characters=None,
decode_coords=None,
drop_variables=None,
backend_kwargs=None,
**kwargs,
):
"""Open and decode a dataset from a file or file-like object.
Parameters
----------
filename_or_obj : str, Path, file-like or DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib", \
"pseudonetcdf", "zarr"} or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``)
can also be used.
chunks : int or dict, optional
If chunks is provided, it is used to load the new dataset into dask
arrays. ``chunks=-1`` loads the dataset with dask using a single
chunk for all arrays. `chunks={}`` loads the dataset with dask using
engine preferred chunks if exposed by the backend, otherwise with
a single chunk for all arrays.
``chunks='auto'`` will use dask ``auto`` chunking taking into account the
engine preferred chunks. See dask chunking for more details.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend. This keyword may not be supported by all the backends.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
This keyword may not be supported by all the backends.
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
This keyword may not be supported by all the backends.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error. This keyword may not be supported by all the backends.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
This keyword may not be supported by all the backends.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
- "coordinates" or True: Set variables referred to in the
``'coordinates'`` attribute of the datasets or individual variables
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
drop_variables: str or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
backend_kwargs: dict
Additional keyword arguments passed on to the engine open function,
equivalent to `**kwargs`.
**kwargs: dict
Additional keyword arguments passed on to the engine open function.
For example:
- 'group': path to the netCDF4 group in the given file to open given as
a str,supported by "netcdf4", "h5netcdf", "zarr".
- 'lock': (Deprecated) resource lock to use when reading data from disk. Only
relevant when using dask or another form of parallelism. By default,
appropriate locks are chosen to safely read and write files with the
currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
"pynio", "pseudonetcdf", "cfgrib".
See engine open function for kwargs accepted by each specific engine.
Returns
-------
dataset : Dataset
The newly created dataset.
Notes
-----
``open_dataset`` opens the file with read-only access. When you modify
values of a Dataset, even one linked to files on disk, only the in-memory
copy you are manipulating in xarray is modified: the original file on disk
is never touched.
See Also
--------
open_mfdataset
"""
if len(args) > 0:
raise TypeError(
"open_dataset() takes only 1 positional argument starting from version 0.18.0, "
"all other options must be passed as keyword arguments"
)
# TODO remove after v0.19
if kwargs.pop("lock", None):
warnings.warn(
"The kwarg 'lock' has been deprecated, and is now "
"ignored. In future (from v0.19) passing lock will "
"raise an error.",
DeprecationWarning,
)
if cache is None:
cache = chunks is None
if backend_kwargs is not None:
kwargs.update(backend_kwargs)
if engine is None:
engine = plugins.guess_engine(filename_or_obj)
backend = plugins.get_backend(engine)
decoders = _resolve_decoders_kwargs(
decode_cf,
open_backend_dataset_parameters=backend.open_dataset_parameters,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
decode_timedelta=decode_timedelta,
concat_characters=concat_characters,
use_cftime=use_cftime,
decode_coords=decode_coords,
)
overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None)
backend_ds = backend.open_dataset(
filename_or_obj,
drop_variables=drop_variables,
**decoders,
**kwargs,
)
ds = _dataset_from_backend_dataset(
backend_ds,
filename_or_obj,
engine,
chunks,
cache,
overwrite_encoded_chunks,
drop_variables=drop_variables,
**decoders,
**kwargs,
)
return ds
|
29,654 |
def test_recursive_to_dict():
class C:
def __init__(self, x):
self.x = x
def __repr__(self):
return "<C>"
def _to_dict(self, *, exclude):
assert exclude == ["foo"]
return ["C:", recursive_to_dict(self.x, exclude=exclude)]
class D:
def __repr__(self):
return "<D>"
inp = [
1,
1.1,
True,
False,
None,
"foo",
b"bar",
C,
C(1),
D(),
(1, 2),
[3, 4],
{5, 6},
frozenset([7, 8]),
deque([9, 10]),
]
expect = [
1,
1.1,
True,
False,
None,
"foo",
"b'bar'",
"<class 'test_utils.test_recursive_to_dict.<locals>.C'>",
["C:", 1],
"<D>",
[1, 2],
[3, 4],
list({5, 6}),
list(frozenset([7, 8])),
[9, 10],
]
assert recursive_to_dict(inp, exclude=["foo"]) == expect
# Test recursion
a = []
c = C(a)
a += [c, c]
# The blacklist of already-seen objects is reentrant: a is converted to string when
# found inside itself; c must *not* be converted to string the second time it's
# found, because it's outside of itself.
assert recursive_to_dict(a, exclude=["foo"]) == [
["C:", "[<C>, <C>]"],
["C:", "[<C>, <C>]"],
]
|
def test_recursive_to_dict():
class C:
def __init__(self, x):
self.x = x
def __repr__(self):
return "<C>"
def _to_dict(self, *, exclude):
assert exclude == ["foo"]
return ["C:", recursive_to_dict(self.x, exclude=exclude)]
class D:
def __repr__(self):
return "<D>"
inp = [
1,
1.1,
True,
False,
None,
"foo",
b"bar",
C,
C(1),
D(),
(1, 2),
[3, 4],
{5, 6},
frozenset([7, 8]),
deque([9, 10]),
]
expect = [
1,
1.1,
True,
False,
None,
"foo",
"b'bar'",
"<class 'test_utils.test_recursive_to_dict.<locals>.C'>",
["C:", 1],
"<D>",
[1, 2],
[3, 4],
list({5, 6}),
list(frozenset([7, 8])),
[9, 10],
]
assert recursive_to_dict(inp, exclude=["foo"]) == expect
# Test recursion
a = []
c = C(a)
a += [c, c]
# The blocklist of already-seen objects is reentrant: a is converted to string when
# found inside itself; c must *not* be converted to string the second time it's
# found, because it's outside of itself.
assert recursive_to_dict(a, exclude=["foo"]) == [
["C:", "[<C>, <C>]"],
["C:", "[<C>, <C>]"],
]
|
30,433 |
def check_ip_command(ip, days=MAX_AGE, verbose=VERBOSE, threshold=THRESHOLD):
ip_list = argToList(ip)
entry_list = []
for corrent_ip in ip_list:
params = {
"ipAddress": corrent_ip,
"maxAgeInDays": days
}
if verbose:
params['verbose'] = "verbose"
analysis = http_request("GET", url_suffix=CHECK_CMD, params=params).get("data")
entry_list.append(analysis_to_entry(analysis, verbose=verbose, threshold=threshold))
return entry_list
|
def check_ip_command(ip, days=MAX_AGE, verbose=VERBOSE, threshold=THRESHOLD):
ip_list = argToList(ip)
entry_list = []
for corrent_ip in ip_list:
params = {
"ipAddress": current_ip,
"maxAgeInDays": days
}
if verbose:
params['verbose'] = "verbose"
analysis = http_request("GET", url_suffix=CHECK_CMD, params=params).get("data")
entry_list.append(analysis_to_entry(analysis, verbose=verbose, threshold=threshold))
return entry_list
|
43,050 |
def reduced_density_matrix(system, modes, state_is_pure, batched=False):
"""
Trace out all subsystems except the ones in the 'modes' from 'system'. 'modes' can be either an int or a list.
This operation always returns a mixed state, since we do not know in advance if a mode is entangled with others.
"""
if isinstance(modes, int):
modes = [modes]
if state_is_pure:
reduced_state = mixed(system, batched)
else:
reduced_state = system
num_indices = len(reduced_state.shape)
if batched:
batch_offset = 1
else:
batch_offset = 0
num_modes = (num_indices - batch_offset) // 2 # always mixed
removed_cnt = 0
for m in range(num_modes):
if m not in modes:
reduced_state = partial_trace(reduced_state, m - removed_cnt, False, batched)
removed_cnt += 1
return reduced_state
|
def reduced_density_matrix(system, modes, state_is_pure, batched=False):
"""
Trace out all subsystems except those specified in ``modes`` from ``system``. ``modes`` can be either an int or a list.
This operation always returns a mixed state, since we do not know in advance if a mode is entangled with others.
"""
if isinstance(modes, int):
modes = [modes]
if state_is_pure:
reduced_state = mixed(system, batched)
else:
reduced_state = system
num_indices = len(reduced_state.shape)
if batched:
batch_offset = 1
else:
batch_offset = 0
num_modes = (num_indices - batch_offset) // 2 # always mixed
removed_cnt = 0
for m in range(num_modes):
if m not in modes:
reduced_state = partial_trace(reduced_state, m - removed_cnt, False, batched)
removed_cnt += 1
return reduced_state
|
28,573 |
def plot_joint(
data,
group="posterior",
var_names=None,
filter_vars=None,
transform=None,
coords=None,
figsize=None,
textsize=None,
kind="scatter",
gridsize="auto",
contour=True,
fill_last=True,
joint_kwargs=None,
marginal_kwargs=None,
ax=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""
Plot a scatter or hexbin of two variables with their respective marginals distributions.
Parameters
----------
data: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
group: str, optional
Specifies which :class:`arviz.InferenceData` group should be plotted. Defaults to "posterior".
var_names: str or iterable of str
Variables to be plotted. Iterable of two variables or one variable (with subset
having exactly 2 dimensions) are required. Prefix the variables by ``~`` when you
want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e. the identity function)
coords: mapping, optional
Coordinates of var_names to be plotted, passed to :func:`xarray:xarray.Dataset.sel`
figsize: tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
kind: str
Type of plot to display ("scatter", "kde" or "hexbin")
gridsize: int or (int, int), optional.
The number of hexagons in the x-direction. Ignored when hexbin is False. See :func:`matplotlib.pyplot.hexbin`
for details
contour: bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
fill_last: bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
joint_kwargs: dicts, optional
Additional keywords modifying the join distribution (central subplot)
marginal_kwargs: dicts, optional
Additional keywords modifying the marginals distributions (top and right subplot)
ax: tuple of axes, optional
Tuple containing (ax_joint, ax_hist_x, ax_hist_y). If None, a new figure and axes
will be created. Matplotlib axes or bokeh figures.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Defaults to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.figure` or
:func:`bokeh.plotting.figure`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
ax_joint: joint (central) distribution
ax_hist_x: x (top) distribution
ax_hist_y: y (right) distribution
See Also
--------
plot_pair : Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
Examples
--------
Scatter Joint plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('non_centered_eight')
>>> az.plot_joint(data,
>>> var_names=['theta'],
>>> coords={'school': ['Choate', 'Phillips Andover']},
>>> kind='scatter',
>>> figsize=(6, 6))
Hexbin Joint plot
.. plot::
:context: close-figs
>>> az.plot_joint(data,
>>> var_names=['theta'],
>>> coords={'school': ['Choate', 'Phillips Andover']},
>>> kind='hexbin',
>>> figsize=(6, 6))
KDE Joint plot
.. plot::
:context: close-figs
>>> az.plot_joint(data,
>>> var_names=['theta'],
>>> coords={'school': ['Choate', 'Phillips Andover']},
>>> kind='kde',
>>> figsize=(6, 6))
Overlaid plots:
.. plot::
:context: close-figs
>>> data2 = az.load_arviz_data("centered_eight")
>>> kde_kwargs = {"contourf_kwargs": {"alpha": 0}, "contour_kwargs": {"colors": "k"}}
>>> ax = az.plot_joint(
... data, var_names=("mu", "tau"), kind="kde", fill_last=False,
... joint_kwargs=kde_kwargs, marginal_kwargs={"color": "k"}
... )
>>> kde_kwargs["contour_kwargs"]["colors"] = "r"
>>> az.plot_joint(
... data2, var_names=("mu", "tau"), kind="kde", fill_last=False,
... joint_kwargs=kde_kwargs, marginal_kwargs={"color": "r"}, ax=ax
... )
"""
warnings.warn("plot_joint will be deprecated. Please use plot_pair instead.")
valid_kinds = ["scatter", "kde", "hexbin"]
if kind not in valid_kinds:
raise ValueError(
("Plot type {} not recognized." "Plot type must be in {}").format(kind, valid_kinds)
)
data = convert_to_dataset(data, group=group)
if transform is not None:
data = transform(data)
if coords is None:
coords = {}
var_names = _var_names(var_names, data, filter_vars)
plotters = list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True))
if len(plotters) != 2:
raise Exception(f"Number of variables to be plotted must 2 (you supplied {len(plotters)})")
plot_joint_kwargs = dict(
ax=ax,
figsize=figsize,
plotters=plotters,
kind=kind,
contour=contour,
fill_last=fill_last,
joint_kwargs=joint_kwargs,
gridsize=gridsize,
textsize=textsize,
marginal_kwargs=marginal_kwargs,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_joint", "jointplot", backend)
axes = plot(**plot_joint_kwargs)
return axes
|
def plot_joint(
data,
group="posterior",
var_names=None,
filter_vars=None,
transform=None,
coords=None,
figsize=None,
textsize=None,
kind="scatter",
gridsize="auto",
contour=True,
fill_last=True,
joint_kwargs=None,
marginal_kwargs=None,
ax=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""
Plot a scatter or hexbin of two variables with their respective marginals distributions.
Parameters
----------
data: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
group: str, optional
Specifies which :class:`arviz.InferenceData` group should be plotted. Defaults to "posterior".
var_names: str or iterable of str
Variables to be plotted. Iterable of two variables or one variable (with subset
having exactly 2 dimensions) are required. Prefix the variables by ``~`` when you
want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e. the identity function)
coords: mapping, optional
Coordinates of var_names to be plotted, passed to :meth:`xarray.Dataset.sel`
figsize: tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
kind: str
Type of plot to display ("scatter", "kde" or "hexbin")
gridsize: int or (int, int), optional.
The number of hexagons in the x-direction. Ignored when hexbin is False. See :func:`matplotlib.pyplot.hexbin`
for details
contour: bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
fill_last: bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
joint_kwargs: dicts, optional
Additional keywords modifying the join distribution (central subplot)
marginal_kwargs: dicts, optional
Additional keywords modifying the marginals distributions (top and right subplot)
ax: tuple of axes, optional
Tuple containing (ax_joint, ax_hist_x, ax_hist_y). If None, a new figure and axes
will be created. Matplotlib axes or bokeh figures.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Defaults to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.figure` or
:func:`bokeh.plotting.figure`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
ax_joint: joint (central) distribution
ax_hist_x: x (top) distribution
ax_hist_y: y (right) distribution
See Also
--------
plot_pair : Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
Examples
--------
Scatter Joint plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('non_centered_eight')
>>> az.plot_joint(data,
>>> var_names=['theta'],
>>> coords={'school': ['Choate', 'Phillips Andover']},
>>> kind='scatter',
>>> figsize=(6, 6))
Hexbin Joint plot
.. plot::
:context: close-figs
>>> az.plot_joint(data,
>>> var_names=['theta'],
>>> coords={'school': ['Choate', 'Phillips Andover']},
>>> kind='hexbin',
>>> figsize=(6, 6))
KDE Joint plot
.. plot::
:context: close-figs
>>> az.plot_joint(data,
>>> var_names=['theta'],
>>> coords={'school': ['Choate', 'Phillips Andover']},
>>> kind='kde',
>>> figsize=(6, 6))
Overlaid plots:
.. plot::
:context: close-figs
>>> data2 = az.load_arviz_data("centered_eight")
>>> kde_kwargs = {"contourf_kwargs": {"alpha": 0}, "contour_kwargs": {"colors": "k"}}
>>> ax = az.plot_joint(
... data, var_names=("mu", "tau"), kind="kde", fill_last=False,
... joint_kwargs=kde_kwargs, marginal_kwargs={"color": "k"}
... )
>>> kde_kwargs["contour_kwargs"]["colors"] = "r"
>>> az.plot_joint(
... data2, var_names=("mu", "tau"), kind="kde", fill_last=False,
... joint_kwargs=kde_kwargs, marginal_kwargs={"color": "r"}, ax=ax
... )
"""
warnings.warn("plot_joint will be deprecated. Please use plot_pair instead.")
valid_kinds = ["scatter", "kde", "hexbin"]
if kind not in valid_kinds:
raise ValueError(
("Plot type {} not recognized." "Plot type must be in {}").format(kind, valid_kinds)
)
data = convert_to_dataset(data, group=group)
if transform is not None:
data = transform(data)
if coords is None:
coords = {}
var_names = _var_names(var_names, data, filter_vars)
plotters = list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True))
if len(plotters) != 2:
raise Exception(f"Number of variables to be plotted must 2 (you supplied {len(plotters)})")
plot_joint_kwargs = dict(
ax=ax,
figsize=figsize,
plotters=plotters,
kind=kind,
contour=contour,
fill_last=fill_last,
joint_kwargs=joint_kwargs,
gridsize=gridsize,
textsize=textsize,
marginal_kwargs=marginal_kwargs,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_joint", "jointplot", backend)
axes = plot(**plot_joint_kwargs)
return axes
|
52,054 |
def parition_errors(result_iterable: T.Sequence) -> (T.Generator, T.Generator):
return partition(lambda r: isinstance(r, tuple), result_iterable)
|
def collate_results(result_iterable: T.Sequence) -> (T.Generator, T.Generator):
"""Partition results into separate sequences of successes and errors."""
return partition(lambda r: isinstance(r, tuple), result_iterable)
|
27,331 |
def test_array_unique_int_1d():
universe=u()
assert unique_int_1d(universe.atoms[100:130].resindices) == [5,6,7]
|
def test_array_unique_int_1d():
universe=u()
assert_equal(unique_int_1d(universe.atoms[100:130].resindices), [5,6,7])
|
40,481 |
def dropout_node(edge_index: Tensor, edge_attr: OptTensor = None,
p: float = 0.5, num_nodes: Optional[int] = None,
training: bool = True) -> Tuple[Tensor, OptTensor]:
r"""Randomly drops nodes from the adjacency matrix
:obj:`(edge_index, edge_attr)` with probability :obj:`p` using samples from
a Bernoulli distribution.
Args:
edge_index (LongTensor): The edge indices.
edge_attr (Tensor, optional): Edge weights or multi-dimensional
edge features. (default: :obj:`None`)
p (float, optional): Dropout probability. (default: :obj:`0.5`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
training (bool, optional): If set to :obj:`False`, this operation is a
no-op. (default: :obj:`True`)
Examples:
>>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],
... [1, 0, 2, 1, 3, 2]])
>>> edge_attr = torch.tensor([1, 2, 3, 4, 5, 6])
>>> dropout_node(edge_index, edge_attr)
(tensor([[2, 3],
[3, 2]]),
tensor([5, 6]))
"""
if p < 0. or p > 1.:
raise ValueError(f'Dropout probability has to be between 0 and 1 '
f'(got {p}')
if not training or p == 0.0:
return edge_index, edge_attr
num_nodes = maybe_num_nodes(edge_index, num_nodes)
nodes = torch.arange(num_nodes, dtype=torch.long, device=edge_index.device)
mask = torch.full_like(nodes, 1 - p, dtype=torch.float32)
mask = torch.bernoulli(mask).to(torch.bool)
subset = nodes[mask]
return subgraph(subset, edge_index, edge_attr, num_nodes=num_nodes)
|
def dropout_node(edge_index: Tensor, edge_attr: OptTensor = None,
p: float = 0.5, num_nodes: Optional[int] = None,
training: bool = True) -> Tuple[Tensor, OptTensor]:
r"""Randomly drops nodes from the adjacency matrix
:obj:`(edge_index, edge_attr)` with probability :obj:`p` using samples from
a Bernoulli distribution.
Args:
edge_index (LongTensor): The edge indices.
edge_attr (Tensor, optional): Edge weights or multi-dimensional
edge features. (default: :obj:`None`)
p (float, optional): Dropout probability. (default: :obj:`0.5`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
training (bool, optional): If set to :obj:`False`, this operation is a
no-op. (default: :obj:`True`)
Examples:
>>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],
... [1, 0, 2, 1, 3, 2]])
>>> edge_attr = torch.tensor([1, 2, 3, 4, 5, 6])
>>> dropout_node(edge_index, edge_attr)
(tensor([[2, 3],
[3, 2]]),
tensor([5, 6]))
"""
if p < 0. or p > 1.:
raise ValueError(f'Dropout probability has to be between 0 and 1 '
f'(got {p}')
if not training or p == 0.0:
return edge_index, edge_attr
num_nodes = maybe_num_nodes(edge_index, num_nodes)
nodes = torch.arange(num_nodes, dtype=torch.long, device=edge_index.device)
mask = torch.full_like(nodes, 1 - p, dtype=torch.float32)
mask = torch.bernoulli(mask).to(torch.bool)
return subgraph(mask, edge_index, edge_attr, num_nodes=num_nodes)
|
49,604 |
def test_full_detects_da_dtype():
x = da.from_array(100)
# This shall not raise an NotImplementedError due to dtype detected as object.
a = da.full(shape=(3, 3), fill_value=x)
assert a.dtype == x.dtype
assert (a.compute() == 100).all()
|
def test_full_detects_da_dtype():
x = da.from_array(100)
with pytest.warns(FutureWarning, match="not implemented by Dask array") as record:
# This shall not raise an NotImplementedError due to dtype detected as object.
a = da.full(shape=(3, 3), fill_value=x)
assert a.dtype == x.dtype
assert_eq(a, np.full(shape=(3, 3), fill_value=100))
assert len(record) == 2
|
36,718 |
def dump(co):
"""Print out a text representation of a code object."""
for attr in ["name", "argcount", "posonlyargcount",
"kwonlyargcount", "names", "varnames",
"cellvars", "freevars", "nlocals", "flags"]:
print("%s: %s" % (attr, getattr(co, "co_" + attr)))
print("lnotab:", repr(co.co_lnotab))
print("consts:", tuple(consts(co.co_consts)))
|
def dump(co):
"""Print out a text representation of a code object."""
for attr in ["name", "argcount", "posonlyargcount",
"kwonlyargcount", "names", "varnames",
"cellvars", "freevars", "nlocals", "flags"]:
print("%s: %s" % (attr, getattr(co, "co_" + attr)))
print("lnotab:", list(co.co_lnotab))
print("consts:", tuple(consts(co.co_consts)))
|
34,171 |
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
import rasa.cli.arguments.train as core_cli
train_parser = subparsers.add_parser(
"train",
help="Train a Rasa model using your NLU data and stories.",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
arguments.train.set_train_arguments(train_parser)
train_subparsers = train_parser.add_subparsers()
train_core_parser = train_subparsers.add_parser(
"core",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Train a Rasa Core model using your stories.",
)
train_core_parser.set_defaults(func=train_core)
train_nlu_parser = train_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Train a Rasa NLU model using your NLU data.",
)
train_nlu_parser.set_defaults(func=train_nlu)
train_parser.set_defaults(func=train)
arguments.train.set_train_core_arguments(train_core_parser)
arguments.train.set_train_nlu_arguments(train_nlu_parser)
|
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
import rasa.cli.arguments.train as core_cli
train_parser = subparsers.add_parser(
"train",
help="Train a Rasa model using your NLU data and stories.",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
arguments.train.set_train_arguments(train_parser)
train_subparsers = train_parser.add_subparsers()
train_core_parser = train_subparsers.add_parser(
"core",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Trains a Rasa Core model using your stories.",
)
train_core_parser.set_defaults(func=train_core)
train_nlu_parser = train_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Train a Rasa NLU model using your NLU data.",
)
train_nlu_parser.set_defaults(func=train_nlu)
train_parser.set_defaults(func=train)
arguments.train.set_train_core_arguments(train_core_parser)
arguments.train.set_train_nlu_arguments(train_nlu_parser)
|
35,510 |
def get_gnss_measurements(log_reader):
gnss_measurements = []
for msg in log_reader:
if msg.which() == "ubloxGnss":
ublox_msg = msg.ubloxGnss
if ublox_msg.which == 'measurementReport':
report = ublox_msg.measurementReport
if len(report.measurements) > 0:
gnss_measurements += [read_raw_ublox(report)]
return gnss_measurements
|
def get_gnss_measurements(log_reader):
gnss_measurements = []
for msg in log_reader:
if msg.which() == "ubloxGnss":
ublox_msg = msg.ubloxGnss
if ublox_msg.which == 'measurementReport':
report = ublox_msg.measurementReport
if len(report.measurements) > 0:
gnss_measurements.append(read_raw_ublox(report))
return gnss_measurements
|
35,290 |
def non_negative_tucker_hals(tensor, rank, n_iter_max=100, init="svd", svd='numpy_svd', tol=1e-8,
sparsity_coefficients=None, core_sparsity_coefficient=None,
fixed_modes=None, random_state=None,
verbose=False, normalize_factors=False, return_errors=False, exact=False,
algorithm='fista'):
"""
Non-negative Tucker decomposition
Uses HALS to update each factor columnwise and uses
fista or active set algorithm to update the core, see [1]_
Parameters
----------
tensor : ndarray
rank : None, int or int list
size of the core tensor, ``(len(ranks) == tensor.ndim)``
if int, the same rank is used for all modes
n_iter_max : int
maximum number of iteration
init : {'svd', 'random'}, optional
svd : str, default is 'numpy_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
tol : float, optional
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
Default: 1e-8
sparsity_coefficients : array of float (as much as the number of modes)
The sparsity coefficients are used for each factor
If set to None, the algorithm is computed without sparsity
Default: None
core_sparsity_coefficient : array of float. This coefficient imposes sparsity on core
when it is updated with fista.
Default: None
fixed_modes : array of integers (between 0 and the number of modes)
Has to be set not to update a factor, 0 and 1 for U and V respectively
Default: None
verbose : boolean
Indicates whether the algorithm prints the successive
reconstruction errors or not
Default: False
return_errors : boolean
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
exact : If it is True, the HALS nnls subroutines give results with high precision but it needs high computational cost.
If it is False, the algorithm gives an approximate solution.
Default: False
algorithm : {'fista', 'active_set'}
Non negative least square solution to update the core.
Default: 'fista'
Returns
-------
factors : ndarray list
list of positive factors of the CP decomposition
element `i` is of shape ``(tensor.shape[i], rank)``
errors: list
A list of reconstruction errors at each iteration of the algorithm.
Notes
-----
Tucker decomposes a tensor into a core tensor and list of factors:
.. math::
\\begin{equation}
tensor = [| core; factors[0], ... ,factors[-1] |]
\\end{equation}
We solve the following problem for each factor:
.. math::
\\begin{equation}
\\min_{tensor >= 0} ||tensor_[i] - factors[i]\\times core_[i] \\times (\\prod_{i\\neq j}(factors[j]))^T||^2
\\end{equation}
If we define two variables such as:
.. math::
U = core_[i] \\times (\\prod_{i\\neq j}(factors[j]\\times factors[j]^T)) \\
M = tensor_[i]
Gradient of the problem becomes:
.. math::
\\begin{equation}
\\delta = -U^TM + factors[i] \\times U^TU
\\end{equation}
In order to calculate UTU and UTM, we define two variables:
.. math::
\\begin{equation}
core_cross = \prod_{i\\neq j}(core_[i] \\times (\\prod_{i\\neq j}(factors[j]\\times factors[j]^T)) \\
tensor_cross = \prod_{i\\neq j} tensor_[i] \\times factors_[i]
\\end{equation}
Then UTU and UTM becomes:
.. math::
\\begin{equation}
UTU = core_cross_[j] \\times core_[j]^T \\
UTM = (tensor_cross_[j] \\times \\times core_[j]^T)^T
\\end{equation}
References
----------
.. [1] tl.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
"""
rank = validate_tucker_rank(tl.shape(tensor), rank=rank)
n_modes = tl.ndim(tensor)
if sparsity_coefficients is None or isinstance(sparsity_coefficients, float):
sparsity_coefficients = [sparsity_coefficients] * n_modes
if fixed_modes is None:
fixed_modes = []
# Avoiding errors
for fixed_value in fixed_modes:
sparsity_coefficients[fixed_value] = None
# Generating the mode update sequence
modes = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes]
nn_core, nn_factors = initialize_tucker(tensor, rank, modes, init=init, svd=svd, random_state=random_state,
non_negative=True)
# initialisation - declare local variables
norm_tensor = tl.norm(tensor, 2)
rec_errors = []
# Iterate over one step of NTD
for iteration in range(n_iter_max):
# One pass of least squares on each updated mode
for mode in modes:
# Computing Hadamard of cross-products
pseudo_inverse = nn_factors.copy()
for i, factor in enumerate(nn_factors):
if i != mode:
pseudo_inverse[i] = tl.dot(tl.conj(tl.transpose(factor)), factor)
# UtU
core_cross = multi_mode_dot(nn_core, pseudo_inverse, skip=mode)
UtU = tl.dot(unfold(core_cross, mode), tl.transpose(unfold(nn_core, mode)))
# UtM
tensor_cross = multi_mode_dot(tensor, nn_factors, skip=mode, transpose=True)
MtU = tl.dot(unfold(tensor_cross, mode), tl.transpose(unfold(nn_core, mode)))
UtM = tl.transpose(MtU)
# Call the hals resolution with nnls, optimizing the current mode
nn_factor, _, _, _ = hals_nnls(UtM, UtU, tl.transpose(nn_factors[mode]),
n_iter_max=100, sparsity_coefficient=sparsity_coefficients[mode],
exact=exact)
nn_factors[mode] = tl.transpose(nn_factor)
# updating core
if algorithm == 'fista':
pseudo_inverse[-1] = tl.dot(tl.transpose(nn_factors[-1]), nn_factors[-1])
core_estimation = multi_mode_dot(tensor, nn_factors, transpose=True)
learning_rate = 1
for MtM in pseudo_inverse:
learning_rate *= 1 / (tl.partial_svd(MtM)[1][0])
nn_core = fista(core_estimation, pseudo_inverse, x=nn_core, n_iter_max=n_iter_max,
sparsity_coef=core_sparsity_coefficient, lr=learning_rate,)
if algorithm == 'active_set':
pseudo_inverse[-1] = tl.dot(tl.transpose(nn_factors[-1]), nn_factors[-1])
core_estimation_vec = tl.base.tensor_to_vec(tl.tenalg.mode_dot(tensor_cross, tl.transpose(nn_factors[modes[-1]]), modes[-1]))
pseudo_inverse_kr = tl.tenalg.kronecker(pseudo_inverse)
vectorcore = active_set_nnls(core_estimation_vec, pseudo_inverse_kr, x=nn_core, n_iter_max=n_iter_max)
nn_core = tl.reshape(vectorcore, tl.shape(nn_core))
# Adding the l1 norm value to the reconstruction error
sparsity_error = 0
for index, sparse in enumerate(sparsity_coefficients):
if sparse:
sparsity_error += 2 * (sparse * tl.norm(nn_factors[index], order=1))
# error computation
rec_error = tl.norm(tensor - tucker_to_tensor((nn_core, nn_factors)), 2) / norm_tensor
rec_errors.append(rec_error)
if iteration > 1:
if verbose:
print('reconstruction error={}, variation={}.'.format(
rec_errors[-1], rec_errors[-2] - rec_errors[-1]))
if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
if verbose:
print('converged in {} iterations.'.format(iteration))
break
tensor = TuckerTensor((nn_core, nn_factors))
if return_errors:
return tensor, rec_errors
else:
return tensor
|
def non_negative_tucker_hals(tensor, rank, n_iter_max=100, init="svd", svd='numpy_svd', tol=1e-8,
sparsity_coefficients=None, core_sparsity_coefficient=None,
fixed_modes=None, random_state=None,
verbose=False, normalize_factors=False, return_errors=False, exact=False,
algorithm='fista'):
"""
Non-negative Tucker decomposition
Uses HALS to update each factor columnwise and uses
fista or active set algorithm to update the core, see [1]_
Parameters
----------
tensor : ndarray
rank : None, int or int list
size of the core tensor, ``(len(ranks) == tensor.ndim)``
if int, the same rank is used for all modes
n_iter_max : int
maximum number of iteration
init : {'svd', 'random'}, optional
svd : str, default is 'numpy_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
tol : float, optional
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
Default: 1e-8
sparsity_coefficients : array of float (as much as the number of modes)
The sparsity coefficients are used for each factor
If set to None, the algorithm is computed without sparsity
Default: None
core_sparsity_coefficient : array of float. This coefficient imposes sparsity on core
when it is updated with fista.
Default: None
fixed_modes : array of integers (between 0 and the number of modes)
Has to be set not to update a factor, 0 and 1 for U and V respectively
Default: None
verbose : boolean
Indicates whether the algorithm prints the successive
reconstruction errors or not
Default: False
return_errors : boolean
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
exact : If it is True, the HALS nnls subroutines give results with high precision but it needs high computational cost.
If it is False, the algorithm gives an approximate solution.
Default: False
algorithm : {'fista', 'active_set'}
Non negative least square solution to update the core.
Default: 'fista'
Returns
-------
factors : ndarray list
list of positive factors of the CP decomposition
element `i` is of shape ``(tensor.shape[i], rank)``
errors: list
A list of reconstruction errors at each iteration of the algorithm.
Notes
-----
Tucker decomposes a tensor into a core tensor and list of factors:
.. math::
\\begin{equation}
tensor = [| core; factors[0], ... ,factors[-1] |]
\\end{equation}
We solve the following problem for each factor:
.. math::
\\begin{equation}
\\min_{tensor >= 0} ||tensor_[i] - factors[i]\\times core_[i] \\times (\\prod_{i\\neq j}(factors[j]))^T||^2
\\end{equation}
If we define two variables such as:
.. math::
U = core_[i] \\times (\\prod_{i\\neq j}(factors[j]\\times factors[j]^T)) \\
M = tensor_[i]
Gradient of the problem becomes:
.. math::
\\begin{equation}
\\delta = -U^TM + factors[i] \\times U^TU
\\end{equation}
In order to calculate UTU and UTM, we define two variables:
.. math::
\\begin{equation}
core_cross = \prod_{i\\neq j}(core_[i] \\times (\\prod_{i\\neq j}(factors[j]\\times factors[j]^T)) \\
tensor_cross = \prod_{i\\neq j} tensor_[i] \\times factors_[i]
\\end{equation}
Then UTU and UTM becomes:
.. math::
\\begin{equation}
UTU = core_cross_[j] \\times core_[j]^T \\
UTM = (tensor_cross_[j] \\times \\times core_[j]^T)^T
\\end{equation}
References
----------
.. [1] tl.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
"""
rank = validate_tucker_rank(tl.shape(tensor), rank=rank)
n_modes = tl.ndim(tensor)
if sparsity_coefficients is None or not isinstance(sparsity_coefficients, Iterable):
sparsity_coefficients = [sparsity_coefficients] * n_modes
if fixed_modes is None:
fixed_modes = []
# Avoiding errors
for fixed_value in fixed_modes:
sparsity_coefficients[fixed_value] = None
# Generating the mode update sequence
modes = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes]
nn_core, nn_factors = initialize_tucker(tensor, rank, modes, init=init, svd=svd, random_state=random_state,
non_negative=True)
# initialisation - declare local variables
norm_tensor = tl.norm(tensor, 2)
rec_errors = []
# Iterate over one step of NTD
for iteration in range(n_iter_max):
# One pass of least squares on each updated mode
for mode in modes:
# Computing Hadamard of cross-products
pseudo_inverse = nn_factors.copy()
for i, factor in enumerate(nn_factors):
if i != mode:
pseudo_inverse[i] = tl.dot(tl.conj(tl.transpose(factor)), factor)
# UtU
core_cross = multi_mode_dot(nn_core, pseudo_inverse, skip=mode)
UtU = tl.dot(unfold(core_cross, mode), tl.transpose(unfold(nn_core, mode)))
# UtM
tensor_cross = multi_mode_dot(tensor, nn_factors, skip=mode, transpose=True)
MtU = tl.dot(unfold(tensor_cross, mode), tl.transpose(unfold(nn_core, mode)))
UtM = tl.transpose(MtU)
# Call the hals resolution with nnls, optimizing the current mode
nn_factor, _, _, _ = hals_nnls(UtM, UtU, tl.transpose(nn_factors[mode]),
n_iter_max=100, sparsity_coefficient=sparsity_coefficients[mode],
exact=exact)
nn_factors[mode] = tl.transpose(nn_factor)
# updating core
if algorithm == 'fista':
pseudo_inverse[-1] = tl.dot(tl.transpose(nn_factors[-1]), nn_factors[-1])
core_estimation = multi_mode_dot(tensor, nn_factors, transpose=True)
learning_rate = 1
for MtM in pseudo_inverse:
learning_rate *= 1 / (tl.partial_svd(MtM)[1][0])
nn_core = fista(core_estimation, pseudo_inverse, x=nn_core, n_iter_max=n_iter_max,
sparsity_coef=core_sparsity_coefficient, lr=learning_rate,)
if algorithm == 'active_set':
pseudo_inverse[-1] = tl.dot(tl.transpose(nn_factors[-1]), nn_factors[-1])
core_estimation_vec = tl.base.tensor_to_vec(tl.tenalg.mode_dot(tensor_cross, tl.transpose(nn_factors[modes[-1]]), modes[-1]))
pseudo_inverse_kr = tl.tenalg.kronecker(pseudo_inverse)
vectorcore = active_set_nnls(core_estimation_vec, pseudo_inverse_kr, x=nn_core, n_iter_max=n_iter_max)
nn_core = tl.reshape(vectorcore, tl.shape(nn_core))
# Adding the l1 norm value to the reconstruction error
sparsity_error = 0
for index, sparse in enumerate(sparsity_coefficients):
if sparse:
sparsity_error += 2 * (sparse * tl.norm(nn_factors[index], order=1))
# error computation
rec_error = tl.norm(tensor - tucker_to_tensor((nn_core, nn_factors)), 2) / norm_tensor
rec_errors.append(rec_error)
if iteration > 1:
if verbose:
print('reconstruction error={}, variation={}.'.format(
rec_errors[-1], rec_errors[-2] - rec_errors[-1]))
if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
if verbose:
print('converged in {} iterations.'.format(iteration))
break
tensor = TuckerTensor((nn_core, nn_factors))
if return_errors:
return tensor, rec_errors
else:
return tensor
|
23,589 |
def sapm(aoi, module, upper=None):
r"""
Determine the incidence angle modifier (IAM) using the SAPM model.
Parameters
----------
aoi : numeric
Angle of incidence in degrees. Negative input angles will return
zeros.
module : dict-like
A dict or Series with the SAPM IAM model parameters.
See the :py:func:`sapm` notes section for more details.
upper : None or float, default None
Upper limit on the results.
Returns
-------
iam : numeric
The SAPM angle of incidence loss coefficient, termed F2 in [1].
Notes
-----
The SAPM [1] traditionally does not define an upper limit on the AOI
loss function and values slightly exceeding 1 may exist for moderate
angles of incidence (15-40 degrees). However, users may consider
imposing an upper limit of 1.
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance
Model", SAND Report 3535, Sandia National Laboratories, Albuquerque,
NM.
[2] B.H. King et al, "Procedure to Determine Coefficients for the
Sandia Array Performance Model (SAPM)," SAND2016-5284, Sandia
National Laboratories (2016).
[3] B.H. King et al, "Recent Advancements in Outdoor Measurement
Techniques for Angle of Incidence Effects," 42nd IEEE PVSC (2015).
DOI: 10.1109/PVSC.2015.7355849
"""
aoi_coeff = [module['B5'], module['B4'], module['B3'], module['B2'],
module['B1'], module['B0']]
iam = np.polyval(aoi_coeff, aoi)
iam = np.clip(iam, 0, upper)
# nan tolerant masking
aoi_lt_0 = np.full_like(aoi, False, dtype='bool')
np.less(aoi, 0, where=~np.isnan(aoi), out=aoi_lt_0)
iam = np.where(aoi_lt_0, 0, iam)
if isinstance(aoi, pd.Series):
iam = pd.Series(iam, aoi.index)
return iam
|
def sapm(aoi, module, upper=None):
r"""
Determine the incidence angle modifier (IAM) using the SAPM model.
Parameters
----------
aoi : numeric
Angle of incidence in degrees. Negative input angles will return
zeros.
module : dict-like
A dict or Series with the SAPM IAM model parameters.
See the :py:func:`pvlib.pvsystem.sapm` notes section for more details.
upper : None or float, default None
Upper limit on the results.
Returns
-------
iam : numeric
The SAPM angle of incidence loss coefficient, termed F2 in [1].
Notes
-----
The SAPM [1] traditionally does not define an upper limit on the AOI
loss function and values slightly exceeding 1 may exist for moderate
angles of incidence (15-40 degrees). However, users may consider
imposing an upper limit of 1.
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance
Model", SAND Report 3535, Sandia National Laboratories, Albuquerque,
NM.
[2] B.H. King et al, "Procedure to Determine Coefficients for the
Sandia Array Performance Model (SAPM)," SAND2016-5284, Sandia
National Laboratories (2016).
[3] B.H. King et al, "Recent Advancements in Outdoor Measurement
Techniques for Angle of Incidence Effects," 42nd IEEE PVSC (2015).
DOI: 10.1109/PVSC.2015.7355849
"""
aoi_coeff = [module['B5'], module['B4'], module['B3'], module['B2'],
module['B1'], module['B0']]
iam = np.polyval(aoi_coeff, aoi)
iam = np.clip(iam, 0, upper)
# nan tolerant masking
aoi_lt_0 = np.full_like(aoi, False, dtype='bool')
np.less(aoi, 0, where=~np.isnan(aoi), out=aoi_lt_0)
iam = np.where(aoi_lt_0, 0, iam)
if isinstance(aoi, pd.Series):
iam = pd.Series(iam, aoi.index)
return iam
|
14,321 |
def buildAnchor(x, y, point=None, deviceX=None, deviceY=None):
"""Builds an Anchor table.
This determines the appropriate anchor format based on the passed parameters.
Args:
x (int): X coordinate.
y (int): Y coordinate.
point (int): Index of glyph contour point, if provided.
deviceX (``otTables.Device``): X coordinate device table, if provided.
deviceY (``otTables.Device``): Y coordinate device table, if provided.
Returns:
An ``otTables.Anchor`` object.
"""
self = ot.Anchor()
self.XCoordinate, self.YCoordinate = x, y
self.Format = 1
if point is not None:
self.AnchorPoint = point
self.Format = 2
if deviceX is not None or deviceY is not None:
assert self.Format == 1, \
"Either point, or both of deviceX/deviceY, must be None."
self.XDeviceTable = deviceX
self.YDeviceTable = deviceY
self.Format = 3
return self
|
def buildAnchor(x, y, point=None, deviceX=None, deviceY=None):
"""Builds an Anchor table.
This determines the appropriate anchor format based on the passed parameters.
Args:
x (int): X coordinate.
y (int): Y coordinate.
point (Optional[int]): Index of glyph contour point, if provided.
deviceX (``otTables.Device``): X coordinate device table, if provided.
deviceY (``otTables.Device``): Y coordinate device table, if provided.
Returns:
An ``otTables.Anchor`` object.
"""
self = ot.Anchor()
self.XCoordinate, self.YCoordinate = x, y
self.Format = 1
if point is not None:
self.AnchorPoint = point
self.Format = 2
if deviceX is not None or deviceY is not None:
assert self.Format == 1, \
"Either point, or both of deviceX/deviceY, must be None."
self.XDeviceTable = deviceX
self.YDeviceTable = deviceY
self.Format = 3
return self
|
29,873 |
def run_combiner(sample_paths: List[str],
out_file: str,
tmp_path: str,
intervals: Optional[List[hl.utils.Interval]] = None,
header: Optional[str] = None,
sample_names: Optional[List[str]] = None,
branch_factor: int = CombinerConfig.default_branch_factor,
batch_size: int = CombinerConfig.default_batch_size,
target_records: int = CombinerConfig.default_target_records,
import_interval_size: Optional[int] = None,
use_genome_default_intervals: bool = False,
use_exome_default_intervals: bool = False,
overwrite: bool = False,
reference_genome: str = 'default',
contig_recoding: Optional[Dict[str, str]] = None,
key_by_locus_and_alleles: bool = False):
"""Run the Hail VCF combiner, performing a hierarchical merge to create a combined sparse matrix table.
**Partitioning**
The partitioning of input GVCFs is determined the four parameters below, one of which must be
passed to this function:
- `intervals` -- User-supplied intervals.
- `import_interval_size` -- Use intervals of this uniform size across the genome.
- `use_genome_default_intervals` -- Use intervals of typical uniform size for whole genome GVCFs.
- `use_exome_default_intervals` -- Use intervals of typical uniform size for exome GVCFs.
It is recommended that new users include either `use_genome_default_intervals` or
`use_exome_default_intervals`.
Parameters
----------
sample_paths : :obj:`list` of :obj:`str`
Paths to individual GVCFs.
out_file : :obj:`str`
Path to final combined matrix table.
tmp_path : :obj:`str`
Path for intermediate output.
intervals : list of :class:`.Interval` or None
Partitioning with which to import GVCFs in first phase of combiner.
header : :obj:`str` or None
External header file to use as GVCF header for all inputs. If defined, `sample_names` must be defined as well.
sample_names: list of :obj:`str` or None
Sample names, to be used with `header`.
branch_factor : :obj:`int`
Combiner branch factor.
batch_size : :obj:`int`
Combiner batch size.
target_records : :obj:`int`
Target records per partition in each combiner phase after the first.
import_interval_size : :obj:`int` or None
The target interval size to partition the reference into intervals for
importing GVCFs.
use_genome_default_intervals : :obj:`bool`
The input GVCFs are genomes, if this is false, they are assumed to
be exomes. If `import_interval_size` is not None, this parameter is
ignored.
use_exome_default_intervals : :obj:`bool`
The input GVCFs are genomes, if this is false, they are assumed to
be exomes. If `import_interval_size` is not None, this parameter is
ignored.
overwrite : :obj:`bool`
Overwrite output file, if it exists.
reference_genome : :obj:`str`
Reference genome for GVCF import.
contig_recoding: :obj:`dict` of (:obj:`str`, :obj:`str`), optional
Mapping from contig name in gVCFs to contig name the reference
genome. All contigs must be present in the
`reference_genome`, so this is useful for mapping
differently-formatted data onto known references.
key_by_locus_and_alleles : :obj:`bool`
Key by both locus and alleles in the final output.
Returns
-------
None
"""
tmp_path += f'/combiner-temporary/{uuid.uuid4()}/'
if header is not None:
assert sample_names is not None
assert len(sample_names) == len(sample_paths)
n_partition_args = (int(intervals is not None)
+ int(import_interval_size is not None)
+ int(use_genome_default_intervals)
+ int(use_exome_default_intervals))
if n_partition_args == 0:
raise ValueError("'run_combiner': require one argument from 'intervals', 'import_interval_size', "
"'use_genome_default_intervals', or 'use_exome_default_intervals' to choose GVCF partitioning")
if n_partition_args > 0:
warning("'run_combiner': multiple colliding arguments found from 'intervals', 'import_interval_size', "
"'use_genome_default_intervals', or 'use_exome_default_intervals'."
"\n The argument found first in the list in this warning will be used, and others ignored.")
if intervals is not None:
info(f"Using {len(intervals)} user-supplied intervals as partitioning for GVCF import")
elif import_interval_size is not None:
intervals = calculate_even_genome_partitioning(reference_genome, import_interval_size)
info(f"Using {len(intervals)} intervals with user-supplied size"
f" {import_interval_size} as partitioning for GVCF import")
elif use_genome_default_intervals:
size = CombinerConfig.default_genome_interval_size
intervals = calculate_even_genome_partitioning(reference_genome, size)
info(f"Using {len(intervals)} intervals with default whole-genome size"
f" {import_interval_size} as partitioning for GVCF import")
elif use_exome_default_intervals:
size = CombinerConfig.default_exome_interval_size
intervals = calculate_even_genome_partitioning(reference_genome, size)
info(f"Using {len(intervals)} intervals with default exome size"
f" {import_interval_size} as partitioning for GVCF import")
assert intervals is not None
config = CombinerConfig(branch_factor=branch_factor,
batch_size=batch_size,
target_records=target_records)
plan = config.plan(len(sample_paths))
files_to_merge = sample_paths
n_phases = len(plan.phases)
total_ops = len(files_to_merge) * n_phases
total_work_done = 0
for phase_i, phase in enumerate(plan.phases):
phase_i += 1 # used for info messages, 1-indexed for readability
n_jobs = len(phase.jobs)
merge_str = 'input GVCFs' if phase_i == 1 else 'intermediate sparse matrix tables'
job_str = hl.utils.misc.plural('job', n_jobs)
info(f"Starting phase {phase_i}/{n_phases}, merging {len(files_to_merge)} {merge_str} in {n_jobs} {job_str}.")
if phase_i > 1:
intervals = calculate_new_intervals(hl.read_matrix_table(files_to_merge[0]).rows(),
config.target_records,
reference_genome=reference_genome)
new_files_to_merge = []
for job_i, job in enumerate(phase.jobs):
job_i += 1 # used for info messages, 1-indexed for readability
n_merges = len(job.merges)
merge_str = hl.utils.misc.plural('file', n_merges)
pct_total = 100 * job.input_total_size / total_ops
info(
f"Starting phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)} to create {n_merges} merged {merge_str}, corresponding to ~{pct_total:.1f}% of total I/O.")
merge_mts: List[MatrixTable] = []
for merge in job.merges:
inputs = [files_to_merge[i] for i in merge.inputs]
if phase_i == 1:
mts = [transform_gvcf(vcf)
for vcf in hl.import_gvcfs(inputs, intervals, array_elements_required=False,
_external_header=header,
_external_sample_ids=[sample_names[i] for i in
merge.inputs] if header is not None else None,
reference_genome=reference_genome,
contig_recoding=contig_recoding)]
else:
mts = [hl.read_matrix_table(path, _intervals=intervals) for path in inputs]
merge_mts.append(combine_gvcfs(mts))
if phase_i == n_phases: # final merge!
assert n_jobs == 1
assert len(merge_mts) == 1
[final_mt] = merge_mts
if key_by_locus_and_alleles:
final_mt = MatrixTable(MatrixKeyRowsBy(final_mt._mir, ['locus', 'alleles'], is_sorted=True))
final_mt.write(out_file, overwrite=overwrite)
new_files_to_merge = [out_file]
info(f"Finished phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, 100% of total I/O finished.")
break
tmp = f'{tmp_path}_phase{phase_i}_job{job_i}/'
hl.experimental.write_matrix_tables(merge_mts, tmp, overwrite=True)
pad = len(str(len(merge_mts)))
new_files_to_merge.extend(tmp + str(n).zfill(pad) + '.mt' for n in range(len(merge_mts)))
total_work_done += job.input_total_size
info(
f"Finished {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, {100 * total_work_done / total_ops:.1f}% of total I/O finished.")
info(f"Finished phase {phase_i}/{n_phases}.")
files_to_merge = new_files_to_merge
assert files_to_merge == [out_file]
info("Finished!")
|
def run_combiner(sample_paths: List[str],
out_file: str,
tmp_path: str,
intervals: Optional[List[hl.utils.Interval]] = None,
header: Optional[str] = None,
sample_names: Optional[List[str]] = None,
branch_factor: int = CombinerConfig.default_branch_factor,
batch_size: int = CombinerConfig.default_batch_size,
target_records: int = CombinerConfig.default_target_records,
import_interval_size: Optional[int] = None,
use_genome_default_intervals: bool = False,
use_exome_default_intervals: bool = False,
overwrite: bool = False,
reference_genome: str = 'default',
contig_recoding: Optional[Dict[str, str]] = None,
key_by_locus_and_alleles: bool = False):
"""Run the Hail VCF combiner, performing a hierarchical merge to create a combined sparse matrix table.
**Partitioning**
The partitioning of input GVCFs is determined the four parameters below, one of which must be
passed to this function:
- `intervals` -- User-supplied intervals.
- `import_interval_size` -- Use intervals of this uniform size across the genome.
- `use_genome_default_intervals` -- Use intervals of typical uniform size for whole genome GVCFs.
- `use_exome_default_intervals` -- Use intervals of typical uniform size for exome GVCFs.
It is recommended that new users include either `use_genome_default_intervals` or
`use_exome_default_intervals`.
Parameters
----------
sample_paths : :obj:`list` of :obj:`str`
Paths to individual GVCFs.
out_file : :obj:`str`
Path to final combined matrix table.
tmp_path : :obj:`str`
Path for intermediate output.
intervals : list of :class:`.Interval` or None
Partitioning with which to import GVCFs in first phase of combiner.
header : :obj:`str` or None
External header file to use as GVCF header for all inputs. If defined, `sample_names` must be defined as well.
sample_names: list of :obj:`str` or None
Sample names, to be used with `header`.
branch_factor : :obj:`int`
Combiner branch factor.
batch_size : :obj:`int`
Combiner batch size.
target_records : :obj:`int`
Target records per partition in each combiner phase after the first.
import_interval_size : :obj:`int` or None
The target interval size to partition the reference into intervals for
importing GVCFs.
use_genome_default_intervals : :obj:`bool`
The input GVCFs are genomes, if this is false, they are assumed to
be exomes. If `import_interval_size` is not None, this parameter is
ignored.
use_exome_default_intervals : :obj:`bool`
The input GVCFs are exomes, if this is false, they are assumed to
be exomes. If `import_interval_size` is not None, this parameter is
ignored.
overwrite : :obj:`bool`
Overwrite output file, if it exists.
reference_genome : :obj:`str`
Reference genome for GVCF import.
contig_recoding: :obj:`dict` of (:obj:`str`, :obj:`str`), optional
Mapping from contig name in gVCFs to contig name the reference
genome. All contigs must be present in the
`reference_genome`, so this is useful for mapping
differently-formatted data onto known references.
key_by_locus_and_alleles : :obj:`bool`
Key by both locus and alleles in the final output.
Returns
-------
None
"""
tmp_path += f'/combiner-temporary/{uuid.uuid4()}/'
if header is not None:
assert sample_names is not None
assert len(sample_names) == len(sample_paths)
n_partition_args = (int(intervals is not None)
+ int(import_interval_size is not None)
+ int(use_genome_default_intervals)
+ int(use_exome_default_intervals))
if n_partition_args == 0:
raise ValueError("'run_combiner': require one argument from 'intervals', 'import_interval_size', "
"'use_genome_default_intervals', or 'use_exome_default_intervals' to choose GVCF partitioning")
if n_partition_args > 0:
warning("'run_combiner': multiple colliding arguments found from 'intervals', 'import_interval_size', "
"'use_genome_default_intervals', or 'use_exome_default_intervals'."
"\n The argument found first in the list in this warning will be used, and others ignored.")
if intervals is not None:
info(f"Using {len(intervals)} user-supplied intervals as partitioning for GVCF import")
elif import_interval_size is not None:
intervals = calculate_even_genome_partitioning(reference_genome, import_interval_size)
info(f"Using {len(intervals)} intervals with user-supplied size"
f" {import_interval_size} as partitioning for GVCF import")
elif use_genome_default_intervals:
size = CombinerConfig.default_genome_interval_size
intervals = calculate_even_genome_partitioning(reference_genome, size)
info(f"Using {len(intervals)} intervals with default whole-genome size"
f" {import_interval_size} as partitioning for GVCF import")
elif use_exome_default_intervals:
size = CombinerConfig.default_exome_interval_size
intervals = calculate_even_genome_partitioning(reference_genome, size)
info(f"Using {len(intervals)} intervals with default exome size"
f" {import_interval_size} as partitioning for GVCF import")
assert intervals is not None
config = CombinerConfig(branch_factor=branch_factor,
batch_size=batch_size,
target_records=target_records)
plan = config.plan(len(sample_paths))
files_to_merge = sample_paths
n_phases = len(plan.phases)
total_ops = len(files_to_merge) * n_phases
total_work_done = 0
for phase_i, phase in enumerate(plan.phases):
phase_i += 1 # used for info messages, 1-indexed for readability
n_jobs = len(phase.jobs)
merge_str = 'input GVCFs' if phase_i == 1 else 'intermediate sparse matrix tables'
job_str = hl.utils.misc.plural('job', n_jobs)
info(f"Starting phase {phase_i}/{n_phases}, merging {len(files_to_merge)} {merge_str} in {n_jobs} {job_str}.")
if phase_i > 1:
intervals = calculate_new_intervals(hl.read_matrix_table(files_to_merge[0]).rows(),
config.target_records,
reference_genome=reference_genome)
new_files_to_merge = []
for job_i, job in enumerate(phase.jobs):
job_i += 1 # used for info messages, 1-indexed for readability
n_merges = len(job.merges)
merge_str = hl.utils.misc.plural('file', n_merges)
pct_total = 100 * job.input_total_size / total_ops
info(
f"Starting phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)} to create {n_merges} merged {merge_str}, corresponding to ~{pct_total:.1f}% of total I/O.")
merge_mts: List[MatrixTable] = []
for merge in job.merges:
inputs = [files_to_merge[i] for i in merge.inputs]
if phase_i == 1:
mts = [transform_gvcf(vcf)
for vcf in hl.import_gvcfs(inputs, intervals, array_elements_required=False,
_external_header=header,
_external_sample_ids=[sample_names[i] for i in
merge.inputs] if header is not None else None,
reference_genome=reference_genome,
contig_recoding=contig_recoding)]
else:
mts = [hl.read_matrix_table(path, _intervals=intervals) for path in inputs]
merge_mts.append(combine_gvcfs(mts))
if phase_i == n_phases: # final merge!
assert n_jobs == 1
assert len(merge_mts) == 1
[final_mt] = merge_mts
if key_by_locus_and_alleles:
final_mt = MatrixTable(MatrixKeyRowsBy(final_mt._mir, ['locus', 'alleles'], is_sorted=True))
final_mt.write(out_file, overwrite=overwrite)
new_files_to_merge = [out_file]
info(f"Finished phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, 100% of total I/O finished.")
break
tmp = f'{tmp_path}_phase{phase_i}_job{job_i}/'
hl.experimental.write_matrix_tables(merge_mts, tmp, overwrite=True)
pad = len(str(len(merge_mts)))
new_files_to_merge.extend(tmp + str(n).zfill(pad) + '.mt' for n in range(len(merge_mts)))
total_work_done += job.input_total_size
info(
f"Finished {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, {100 * total_work_done / total_ops:.1f}% of total I/O finished.")
info(f"Finished phase {phase_i}/{n_phases}.")
files_to_merge = new_files_to_merge
assert files_to_merge == [out_file]
info("Finished!")
|
12,973 |
def resolve_products(
info, user, stock_availability=None, channel_slug=None, **_kwargs
) -> ChannelQsContext:
qs = models.Product.objects.visible_to_user(user, channel_slug)
if stock_availability:
qs = filter_products_by_stock_availability(qs, stock_availability)
if not qs.user_has_access_to_all(user):
qs = qs.annotate_visible_in_listings(channel_slug).exclude(
visible_in_listings=False
)
return ChannelQsContext(qs=qs.distinct(), channel_slug=channel_slug)
|
def resolve_products(
info, requestor, stock_availability=None, channel_slug=None, **_kwargs
) -> ChannelQsContext:
qs = models.Product.objects.visible_to_user(user, channel_slug)
if stock_availability:
qs = filter_products_by_stock_availability(qs, stock_availability)
if not qs.user_has_access_to_all(user):
qs = qs.annotate_visible_in_listings(channel_slug).exclude(
visible_in_listings=False
)
return ChannelQsContext(qs=qs.distinct(), channel_slug=channel_slug)
|
997 |
def exception_colors():
"""Return a color table with fields for exception reporting.
The table is an instance of ColorSchemeTable with schemes added for
'Neutral', 'Linux', 'LightBG' and 'NoColor' and fields for exception handling filled
in.
Examples:
>>> ec = exception_colors()
>>> ec.active_scheme_name
''
>>> print(ec.active_colors)
None
Now we activate a color scheme:
>>> ec.set_active_scheme('NoColor')
>>> ec.active_scheme_name
'NoColor'
>>> sorted(ec.active_colors.keys())
['Normal', 'caret', 'em', 'excName', 'filename', 'filenameEm', 'line',
'lineno', 'linenoEm', 'name', 'nameEm', 'normalEm', 'topline', 'vName',
'val', 'valEm']
"""
ex_colors = ColorSchemeTable()
# Populate it with color schemes
C = TermColors # shorthand and local lookup
ex_colors.add_scheme(ColorScheme(
'NoColor',
# The color to be used for the top line
topline = C.NoColor,
# The colors to be used in the traceback
filename = C.NoColor,
lineno = C.NoColor,
name = C.NoColor,
vName = C.NoColor,
val = C.NoColor,
em = C.NoColor,
# Emphasized colors for the last frame of the traceback
normalEm = C.NoColor,
filenameEm = C.NoColor,
linenoEm = C.NoColor,
nameEm = C.NoColor,
valEm = C.NoColor,
# Colors for printing the exception
excName = C.NoColor,
line = C.NoColor,
caret = C.NoColor,
Normal = C.NoColor
))
# make some schemes as instances so we can copy them for modification easily
ex_colors.add_scheme(ColorScheme(
'Linux',
# The color to be used for the top line
topline = C.LightRed,
# The colors to be used in the traceback
filename = C.Green,
lineno = C.Green,
name = C.Purple,
vName = C.Cyan,
val = C.Green,
em = C.LightCyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.LightCyan,
filenameEm = C.LightGreen,
linenoEm = C.LightGreen,
nameEm = C.LightPurple,
valEm = C.LightBlue,
# Colors for printing the exception
excName = C.LightRed,
line = C.Yellow,
caret = C.White,
Normal = C.Normal,
ExecutingNode = 'bg:#00005f'
))
# For light backgrounds, swap dark/light colors
ex_colors.add_scheme(ColorScheme(
'LightBG',
# The color to be used for the top line
topline = C.Red,
# The colors to be used in the traceback
filename = C.LightGreen,
lineno = C.LightGreen,
name = C.LightPurple,
vName = C.Cyan,
val = C.LightGreen,
em = C.Cyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.Cyan,
filenameEm = C.Green,
linenoEm = C.Green,
nameEm = C.Purple,
valEm = C.Blue,
# Colors for printing the exception
excName = C.Red,
#line = C.Brown, # brown often is displayed as yellow
line = C.Red,
caret = C.Normal,
Normal = C.Normal,
ExecutingNode = 'bg:#005f00'
))
ex_colors.add_scheme(ColorScheme(
'Neutral',
# The color to be used for the top line
topline = C.Red,
# The colors to be used in the traceback
filename = C.LightGreen,
lineno = C.LightGreen,
name = C.LightPurple,
vName = C.Cyan,
val = C.LightGreen,
em = C.Cyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.Cyan,
filenameEm = C.Green,
linenoEm = C.Green,
nameEm = C.Purple,
valEm = C.Blue,
# Colors for printing the exception
excName = C.Red,
#line = C.Brown, # brown often is displayed as yellow
line = C.Red,
caret = C.Normal,
Normal = C.Normal,
ExecutingNode = 'bg:#dddddd'
))
# Hack: the 'neutral' colours are not very visible on a dark background on
# Windows. Since Windows command prompts have a dark background by default, and
# relatively few users are likely to alter that, we will use the 'Linux' colours,
# designed for a dark background, as the default on Windows.
if os.name == "nt":
ex_colors.add_scheme(ex_colors['Linux'].copy('Neutral'))
return ex_colors
|
def exception_colors():
"""Return a color table with fields for exception reporting.
The table is an instance of ColorSchemeTable with schemes added for
'Neutral', 'Linux', 'LightBG' and 'NoColor' and fields for exception handling filled
in.
Examples:
>>> ec = exception_colors()
>>> ec.active_scheme_name
''
>>> print(ec.active_colors)
None
Now we activate a color scheme:
>>> ec.set_active_scheme('NoColor')
>>> ec.active_scheme_name
'NoColor'
>>> sorted(ec.active_colors.keys())
['Normal', 'caret', 'em', 'excName', 'filename', 'filenameEm', 'line',
'lineno', 'linenoEm', 'name', 'nameEm', 'normalEm', 'topline', 'vName',
'val', 'valEm']
"""
ex_colors = ColorSchemeTable()
# Populate it with color schemes
C = TermColors # shorthand and local lookup
ex_colors.add_scheme(ColorScheme(
'NoColor',
# The color to be used for the top line
topline = C.NoColor,
# The colors to be used in the traceback
filename = C.NoColor,
lineno = C.NoColor,
name = C.NoColor,
vName = C.NoColor,
val = C.NoColor,
em = C.NoColor,
# Emphasized colors for the last frame of the traceback
normalEm = C.NoColor,
filenameEm = C.NoColor,
linenoEm = C.NoColor,
nameEm = C.NoColor,
valEm = C.NoColor,
# Colors for printing the exception
excName = C.NoColor,
line = C.NoColor,
caret = C.NoColor,
Normal = C.NoColor
))
# make some schemes as instances so we can copy them for modification easily
ex_colors.add_scheme(ColorScheme(
'Linux',
# The color to be used for the top line
topline = C.LightRed,
# The colors to be used in the traceback
filename = C.Green,
lineno = C.Green,
name = C.Purple,
vName = C.Cyan,
val = C.Green,
em = C.LightCyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.LightCyan,
filenameEm = C.LightGreen,
linenoEm = C.LightGreen,
nameEm = C.LightPurple,
valEm = C.LightBlue,
# Colors for printing the exception
excName = C.LightRed,
line = C.Yellow,
caret = C.White,
Normal = C.Normal,
ExecutingNode = 'bg:#00005f'
))
# For light backgrounds, swap dark/light colors
ex_colors.add_scheme(ColorScheme(
'LightBG',
# The color to be used for the top line
topline = C.Red,
# The colors to be used in the traceback
filename = C.LightGreen,
lineno = C.LightGreen,
name = C.LightPurple,
vName = C.Cyan,
val = C.LightGreen,
em = C.Cyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.Cyan,
filenameEm = C.Green,
linenoEm = C.Green,
nameEm = C.Purple,
valEm = C.Blue,
# Colors for printing the exception
excName = C.Red,
#line = C.Brown, # brown often is displayed as yellow
line = C.Red,
caret = C.Normal,
Normal = C.Normal,
ExecutingNode = 'bg:#005f00'
))
ex_colors.add_scheme(ColorScheme(
'Neutral',
# The color to be used for the top line
topline = C.Red,
# The colors to be used in the traceback
filename = C.LightGreen,
lineno = C.LightGreen,
name = C.LightPurple,
vName = C.Cyan,
val = C.LightGreen,
em = C.Cyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.Cyan,
filenameEm = C.Green,
linenoEm = C.Green,
nameEm = C.Purple,
valEm = C.Blue,
# Colors for printing the exception
excName = C.Red,
#line = C.Brown, # brown often is displayed as yellow
line = C.Red,
caret = C.Normal,
Normal = C.Normal,
ExecutingNode = 'bg:#dddddd',
))
# Hack: the 'neutral' colours are not very visible on a dark background on
# Windows. Since Windows command prompts have a dark background by default, and
# relatively few users are likely to alter that, we will use the 'Linux' colours,
# designed for a dark background, as the default on Windows.
if os.name == "nt":
ex_colors.add_scheme(ex_colors['Linux'].copy('Neutral'))
return ex_colors
|
31,319 |
def get_domain_whois_history(client: Client, **args) -> CommandResults:
domain = args.get("domain")
params = {"page": int(args.get("page", 0))}
uri = f"/history/{domain}/whois"
response = client._http_request("GET", uri, params=params)
records = response["result"].get("items")
results = CommandResults(
outputs_prefix="SecurityTrails",
outputs_key_field=f"SecurityTrails.domain_whois_history",
outputs={"domain_whois_history": records},
readable_output=tableToMarkdown(f"Domain Whois History for {domain}", records),
)
return results
|
def get_domain_whois_history(client: Client, **args) -> CommandResults:
domain = args.get("domain")
params = {"page": int(args.get("page", 0))}
uri = f"/history/{domain}/whois"
response = client._http_request("GET", uri, params=params)
records = response.get("result", {}).get("items")
results = CommandResults(
outputs_prefix="SecurityTrails",
outputs_key_field=f"SecurityTrails.domain_whois_history",
outputs={"domain_whois_history": records},
readable_output=tableToMarkdown(f"Domain Whois History for {domain}", records),
)
return results
|
45,871 |
def distort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Distortion of a set of 2D points based on the lens distortion model.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
"""
assert points.dim() >= 2 and points.shape[-1] == 2
assert K.shape[-2:] == (3, 3)
assert dist.shape[-1] in [4, 5, 8, 12, 14]
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN or (N,)
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN or (N,)
# Distort points
r2 = x * x + y * y
rad_poly = (1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3) / (
1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3
)
xd = (
x * rad_poly
+ 2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
yd = (
y * rad_poly
+ dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
tilt = tiltProjection(dist[..., 12], dist[..., 13])
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
pointsUntilt = torch.stack([xd, yd, torch.ones(xd.shape, device=xd.device, dtype=xd.dtype)], -1) @ tilt.transpose(-2, -1)
xd = pointsUntilt[..., 0] / pointsUntilt[..., 2]
yd = pointsUntilt[..., 1] / pointsUntilt[..., 2]
# Covert points from normalized camera coordinates to pixel coordinates
x = fx * xd + cx
y = fy * yd + cy
return torch.stack([x, y], -1)
|
def distort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Distortion of a set of 2D points based on the lens distortion model.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
"""
assert points.dim() >= 2 and points.shape[-1] == 2
assert K.shape[-2:] == (3, 3)
assert dist.shape[-1] in [4, 5, 8, 12, 14]
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN or (N,)
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN or (N,)
# Distort points
r2 = x * x + y * y
rad_poly = (1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3) / (
1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3
)
xd = (
x * rad_poly
+ 2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
yd = (
y * rad_poly
+ dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
tilt = tiltProjection(dist[..., 12], dist[..., 13])
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
points_untilt = torch.stack([xd, yd, torch.ones_like(xd)], -1) @ tilt.transpose(-2, -1)
xd = pointsUntilt[..., 0] / pointsUntilt[..., 2]
yd = pointsUntilt[..., 1] / pointsUntilt[..., 2]
# Covert points from normalized camera coordinates to pixel coordinates
x = fx * xd + cx
y = fy * yd + cy
return torch.stack([x, y], -1)
|
39,539 |
def valid_sources(sources):
importers = []
unknown_sources = []
for source in sources:
try:
importers.append(importer_mapping[source])
except KeyError:
unknown_sources.append(source)
if unknown_sources:
raise CommandError(f"Unknown sources: {unknown_sources}")
return importers
|
def validate_importers(sources):
importers = []
unknown_sources = []
for source in sources:
try:
importers.append(importer_mapping[source])
except KeyError:
unknown_sources.append(source)
if unknown_sources:
raise CommandError(f"Unknown sources: {unknown_sources}")
return importers
|
28,163 |
def average_rowwise(axs_and_cbaxs: AxesTupleList) -> AxesTupleList:
"""
Take the output plots of plot_by_id and average every heatmap along its
columns. Leave the line plots unchanged.
Args:
axs_and_cbaxs: The output of plot_by_id
Returns:
The same axes and colorbars, but changed to now hold line plots where
once were heatmaps
"""
modified_axs = []
modified_cbaxs = []
for ax, cbax in zip(*axs_and_cbaxs):
mod_ax, mod_cbax = _average_heatmap(ax, cbax, avg_dim='row')
modified_axs.append(mod_ax)
modified_cbaxs.append(mod_cbax)
return modified_axs, modified_cbaxs
|
def average_rowwise(axs_and_cbaxs: AxesTupleList) -> AxesTupleList:
"""
Take the output plots of plot_by_id and average every heatmap along its
rows. Leave the line plots unchanged.
Args:
axs_and_cbaxs: The output of plot_by_id
Returns:
The same axes and colorbars, but changed to now hold line plots where
once were heatmaps
"""
modified_axs = []
modified_cbaxs = []
for ax, cbax in zip(*axs_and_cbaxs):
mod_ax, mod_cbax = _average_heatmap(ax, cbax, avg_dim='row')
modified_axs.append(mod_ax)
modified_cbaxs.append(mod_cbax)
return modified_axs, modified_cbaxs
|
55,268 |
def get_executable_path(name):
"""
Get the path of an executable.
"""
try:
if sys.platform.startswith("win32"):
path = exec_in_terminal(["where", name])
else:
path = exec_in_terminal(["which", name])
except (CalledProcessError, FileNotFoundError):
path = ""
return path
|
def get_executable_path(name):
"""
Get the path of an executable.
"""
# I. Hate. Windows.
if sys.platform.startswith("win32"):
os_which = "where"
else:
os_which = "which"
try:
path = exec_in_terminal([os_which, name])
except (CalledProcessError, FileNotFoundError):
path = ""
return path
|
41,063 |
def main():
parser = get_parser()
args = parser.parse_args()
# logging info
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
logging.info(get_commandline_args())
# check directory
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load model config
model_dir = os.path.dirname(args.model)
train_args = torch.load(model_dir + "/model.conf")
# load statistics
scaler = StandardScaler()
with h5py.File(model_dir + "/stats.h5") as f:
scaler.mean_ = f["/melspc/mean"][()]
scaler.scale_ = f["/melspc/scale"][()]
# load MLSA coef
with h5py.File(model_dir + "/stats.h5") as f:
avg_mcep = f["mcep/mean"][()]
# define noise shaper
noise_shaper = NoiseShaper(
avg_mcep=avg_mcep,
fs=args.fs,
n_shift=args.n_shift,
)
# define model and laod parameters
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = WaveNet(
n_quantize=train_args.n_quantize,
n_aux=train_args.n_aux,
n_resch=train_args.n_resch,
n_skipch=train_args.n_skipch,
dilation_depth=train_args.dilation_depth,
dilation_repeat=train_args.dilation_repeat,
kernel_size=train_args.kernel_size,
upsampling_factor=train_args.upsampling_factor
)
model.load_state_dict(torch.load(
args.model,
map_location=lambda storage,
loc: storage)["model"])
model.eval()
model.to(device)
for idx, (utt_id, lmspc) in enumerate(
file_reader_helper(args.rspecifier, args.filetype), 1):
logging.info("(%d) %s" % (idx, utt_id))
# perform preprocesing
x = encode_mu_law(np.zeros((1)), mu=train_args.n_quantize) # quatize initial seed waveform
h = scaler.transform(lmspc) # normalize features
# convert to tensor
x = torch.tensor(x, dtype=torch.long, device=device) # (1, )
h = torch.tensor(h, dtype=torch.float, device=device) # (T, n_aux)
# get length of waveform
n_samples = (h.shape[0] - 1) * args.n_shift + args.n_fft
# generate
start_time = time.time()
with torch.no_grad():
y = model.generate(x, h, n_samples, interval=100)
logging.info("generation speed = %s (sec / sample)" % ((time.time() - start_time) / (len(y) - 1)))
y = decode_mu_law(y, mu=train_args.n_quantize)
# applay noise shaping
y = noise_shaper(y)
# save as .wav file
write(args.outdir + "/%s.wav" % utt_id,
args.fs,
(y * np.iinfo(np.int16).max).astype(np.int16))
|
def main():
parser = get_parser()
args = parser.parse_args()
# logging info
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
logging.info(get_commandline_args())
# check directory
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load model config
model_dir = os.path.dirname(args.model)
train_args = torch.load(model_dir + "/model.conf")
# load statistics
scaler = StandardScaler()
with h5py.File(model_dir + "/stats.h5") as f:
scaler.mean_ = f["/melspc/mean"][()]
scaler.scale_ = f["/melspc/scale"][()]
# load MLSA coef
with h5py.File(model_dir + "/stats.h5") as f:
avg_mcep = f["mcep/mean"][()]
# define noise shaper
noise_shaper = NoiseShaper(
avg_mcep=avg_mcep,
fs=args.fs,
n_shift=args.n_shift,
)
# define model and laod parameters
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = WaveNet(
n_quantize=train_args.n_quantize,
n_aux=train_args.n_aux,
n_resch=train_args.n_resch,
n_skipch=train_args.n_skipch,
dilation_depth=train_args.dilation_depth,
dilation_repeat=train_args.dilation_repeat,
kernel_size=train_args.kernel_size,
upsampling_factor=train_args.upsampling_factor
)
model.load_state_dict(torch.load(
args.model,
map_location=lambda storage,
loc: storage)["model"])
model.eval()
model.to(device)
for idx, (utt_id, lmspc) in enumerate(
file_reader_helper(args.rspecifier, args.filetype), 1):
logging.info("(%d) %s" % (idx, utt_id))
# perform preprocesing
x = encode_mu_law(np.zeros((1)), mu=train_args.n_quantize) # quatize initial seed waveform
h = scaler.transform(lmspc) # normalize features
# convert to tensor
x = torch.tensor(x, dtype=torch.long, device=device) # (1,)
h = torch.tensor(h, dtype=torch.float, device=device) # (T, n_aux)
# get length of waveform
n_samples = (h.shape[0] - 1) * args.n_shift + args.n_fft
# generate
start_time = time.time()
with torch.no_grad():
y = model.generate(x, h, n_samples, interval=100)
logging.info("generation speed = %s (sec / sample)" % ((time.time() - start_time) / (len(y) - 1)))
y = decode_mu_law(y, mu=train_args.n_quantize)
# applay noise shaping
y = noise_shaper(y)
# save as .wav file
write(args.outdir + "/%s.wav" % utt_id,
args.fs,
(y * np.iinfo(np.int16).max).astype(np.int16))
|
6,986 |
def get_untranslated(lang, untranslated_file, get_all=False, app='all'):
"""Returns all untranslated strings for a language and writes in a file
:param lang: Language code.
:param untranslated_file: Output file path.
:param get_all: Return all strings, translated or not."""
clear_cache()
apps = frappe.get_all_apps(True)
if app != 'all':
if app in apps:
apps = [app]
else:
print('Application {0} not found!'.format(app))
return
messages = []
untranslated = []
for app_name in apps:
messages.extend(get_messages_for_app(app_name))
messages = deduplicate_messages(messages)
def escape_newlines(s):
return s.replace("\\\n", "|||||").replace("\\n", "||||").replace("\n", "|||")
if get_all:
print(str(len(messages)) + " messages")
with open(untranslated_file, "wb") as f:
for m in messages:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m[1]) + os.linesep).encode("utf-8"))
else:
full_dict = get_full_dict(lang)
for m in messages:
if not full_dict.get(m[1]):
untranslated.append(m[1])
if untranslated:
print(str(len(untranslated)) + " missing translations of " + str(len(messages)))
with open(untranslated_file, "wb") as f:
for m in untranslated:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m) + os.linesep).encode("utf-8"))
else:
print("all translated!")
|
def get_untranslated(lang, untranslated_file, get_all=False, app='_ALL_APPS'):
"""Returns all untranslated strings for a language and writes in a file
:param lang: Language code.
:param untranslated_file: Output file path.
:param get_all: Return all strings, translated or not."""
clear_cache()
apps = frappe.get_all_apps(True)
if app != 'all':
if app in apps:
apps = [app]
else:
print('Application {0} not found!'.format(app))
return
messages = []
untranslated = []
for app_name in apps:
messages.extend(get_messages_for_app(app_name))
messages = deduplicate_messages(messages)
def escape_newlines(s):
return s.replace("\\\n", "|||||").replace("\\n", "||||").replace("\n", "|||")
if get_all:
print(str(len(messages)) + " messages")
with open(untranslated_file, "wb") as f:
for m in messages:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m[1]) + os.linesep).encode("utf-8"))
else:
full_dict = get_full_dict(lang)
for m in messages:
if not full_dict.get(m[1]):
untranslated.append(m[1])
if untranslated:
print(str(len(untranslated)) + " missing translations of " + str(len(messages)))
with open(untranslated_file, "wb") as f:
for m in untranslated:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m) + os.linesep).encode("utf-8"))
else:
print("all translated!")
|
8,681 |
def handle_list(options):
"""Display a list of configuration available from Sopel's homedir
:param options: argument parser's parsed option
This command display an unordered list of config's name from the default
Sopel's homedir, without their extension::
$ sopel-config list
default
custom
It is possible to filter by extension using the ``-e/--ext/--extension``
option, default to ``.cfg`` (the ``.`` prefix is not required).
"""
display_path = getattr(options, 'display_path', False)
extension = getattr(options, 'extension', '.cfg')
if not extension.startswith('.'):
extension = '.' + extension
configs = utils.enumerate_configs(config.DEFAULT_HOMEDIR, extension)
for config_filename in configs:
if display_path:
print(os.path.join(config.DEFAULT_HOMEDIR, config_filename))
else:
name, _ = os.path.splitext(config_filename)
print(name)
|
def handle_list(options):
"""Display a list of configuration available from Sopel's homedir
:param options: argument parser's parsed option
This command display an unordered list of config's name from the default
Sopel's homedir, without their extension::
$ sopel-config list
default
custom
It is possible to filter by extension using the ``-e/--ext/--extension``
option; default is ``.cfg`` (the ``.`` prefix is not required).
"""
display_path = getattr(options, 'display_path', False)
extension = getattr(options, 'extension', '.cfg')
if not extension.startswith('.'):
extension = '.' + extension
configs = utils.enumerate_configs(config.DEFAULT_HOMEDIR, extension)
for config_filename in configs:
if display_path:
print(os.path.join(config.DEFAULT_HOMEDIR, config_filename))
else:
name, _ = os.path.splitext(config_filename)
print(name)
|
33,236 |
def test_auto_model_graph(model):
assert model['Name'] == 'ds005_mixedgamblestask'
# run level
block = model['Nodes'][0]
assert block['Name'] == 'Run'
assert block['Level'] == 'Run'
assert block['Model']['Type'] == 'glm'
assert block['Transformations']['Transformer'] == 'pybids-transforms-v1'
assert block['Transformations']['Instructions'][0]['Name'] == 'Factor'
assert block['Contrasts'][0]['Name'] == 'run_parametric gain'
assert block['Contrasts'][0]['Weights'] == [1]
assert block['Contrasts'][0]['Test'] == 't'
# subject level
block = model['Nodes'][1]
assert block['Name'] == 'Subject'
assert block['Level'] == 'Subject'
assert block['Model']['Type'] == 'meta'
assert block['Model']['X'][0] == 'run_parametric gain'
assert block['Contrasts'][0]['Name'] == 'subject_run_parametric gain'
assert block['Contrasts'][0]['Test'] == 't'
# dataset level
block = model['Nodes'][2]
assert block['Name'] == 'Dataset'
assert block['Level'] == 'Dataset'
assert block['Model']['Type'] == 'meta'
assert block['Model']['X'][0] == 'subject_run_parametric gain'
assert block['Contrasts'][0]['Name'] == 'dataset_subject_run_parametric gain'
assert block['Contrasts'][0]['Test'] == 't'
|
def test_auto_model_graph(model):
assert model['Name'] == 'ds005_mixedgamblestask'
# run level
block = model['Nodes'][0]
assert block['Name'] == 'Run'
assert block['Level'] == 'Run'
assert block['Model']['Type'] == 'glm'
assert block['Transformations']['Transformer'] == 'pybids-transforms-v1'
assert block['Transformations']['Instructions'][0]['Name'] == 'Factor'
assert block['Contrasts'][0]['Name'] == 'run_parametric gain'
assert block['Contrasts'][0]['Weights'] == [1]
assert block['Contrasts'][0]['Test'] == 't'
# subject level
block = model['Nodes'][1]
assert block['Name'] == 'Subject'
assert block['Level'] == 'Subject'
assert block['Model']['Type'] == 'meta'
assert block['Model']['X'][0] == 'run_parametric gain'
assert block['Contrasts'][0]['Name'] == 'subject_run_parametric gain'
assert block['Contrasts'][0]['Test'] == 't'
# dataset level
block = model['Nodes'][2]
assert block['Name'] == 'Dataset'
assert block['Level'] == 'Dataset'
assert block['Model']['Type'] == 'glm'
assert block['Model']['X'][0] == 'subject_run_parametric gain'
assert block['Contrasts'][0]['Name'] == 'dataset_subject_run_parametric gain'
assert block['Contrasts'][0]['Test'] == 't'
|
50,331 |
def _prepare_view_identifier(dialect, view_name, schema=None):
quoted_view_name = dialect.identifier_preparer.quote(view_name)
if schema:
return dialect.identifier_preparer.quote_schema(schema) + '.' + quoted_view_name
else:
return quoted_view_name
|
def _prepare_view_identifier(dialect, view_name, schema=None):
quoted_view_name = dialect.identifier_preparer.quote(view_name)
if schema:
return dialect.identifier_preparer.quote_schema(schema) + '.' + quoted_view_name
return quoted_view_name
|
38,268 |
def serviceUrl(svc, ssl=False):
port = int(os.environ['{}_SERVICE_PORT'.fornat(svc.upper())])
host = os.environ['{}_SERVICE_HOST'.format(svc.upper())]
schema = 'https' if ssl else 'http'
ipv6 = ':' in host
return ('{}://[{}]:{}' if ipv6 else '{}://{}:{}').format(schema, host, port)
|
def serviceUrl(svc, ssl=False):
port = int(os.environ['{}_SERVICE_PORT'.format(svc.upper())])
host = os.environ['{}_SERVICE_HOST'.format(svc.upper())]
schema = 'https' if ssl else 'http'
ipv6 = ':' in host
return ('{}://[{}]:{}' if ipv6 else '{}://{}:{}').format(schema, host, port)
|
53,579 |
def regression_6211(x: int = 0) -> None:
"""This is a regression test for issue #6211.
False negative of "missing param doc" was being issued when "default" used in
NumPy-style docs. This test should return no errors.
See https://github.com/PyCQA/pylint/issues/6211
Parameter
---------
x : int, default 0
The x parameter
"""
print(x)
|
def regression_6211(number: int = 0) -> None:
"""This is a regression test for issue #6211.
False negative of "missing param doc" was being issued when "default" used in
NumPy-style docs. This test should return no errors.
See https://github.com/PyCQA/pylint/issues/6211
Parameter
---------
number : int, default 0
The number parameter
"""
print(number)
|
31,175 |
def get_indicators_context(incident):
file_context: List[Any] = []
process_context: List[Any] = []
ip_context: List[Any] = []
for alert in incident.get('alerts'):
# file context
file_details = {
'Name': alert.get('action_file_name'),
'Path': alert.get('action_file_path'),
'SHA265': alert.get('action_file_sha256'),
'MD5': alert.get('action_file_md5')
}
remove_nulls_from_dictionary(file_details)
if file_details:
file_context.append(file_details)
# process context
process_types = ['actor', 'os_actor', 'causality_actor', 'action']
for process_type in process_types:
single_process_context = get_process_context(alert, process_type)
if single_process_context:
process_context.append(single_process_context)
# ip context
add_to_ip_context(alert, ip_context)
network_artifacts = incident.get('network_artifacts', [])
domain_context = create_context_from_network_artifacts(network_artifacts, ip_context)
file_artifacts = incident.get('file_artifacts', [])
for file in file_artifacts:
file_context.append({
'Name': file.get('file_name'),
'SHA256': file.get('file_sha256')
})
return file_context, process_context, domain_context, ip_context
|
def get_indicators_context(incident):
file_context: List[Any] = []
process_context: List[Any] = []
ip_context: List[Any] = []
for alert in incident.get('alerts', []):
# file context
file_details = {
'Name': alert.get('action_file_name'),
'Path': alert.get('action_file_path'),
'SHA265': alert.get('action_file_sha256'),
'MD5': alert.get('action_file_md5')
}
remove_nulls_from_dictionary(file_details)
if file_details:
file_context.append(file_details)
# process context
process_types = ['actor', 'os_actor', 'causality_actor', 'action']
for process_type in process_types:
single_process_context = get_process_context(alert, process_type)
if single_process_context:
process_context.append(single_process_context)
# ip context
add_to_ip_context(alert, ip_context)
network_artifacts = incident.get('network_artifacts', [])
domain_context = create_context_from_network_artifacts(network_artifacts, ip_context)
file_artifacts = incident.get('file_artifacts', [])
for file in file_artifacts:
file_context.append({
'Name': file.get('file_name'),
'SHA256': file.get('file_sha256')
})
return file_context, process_context, domain_context, ip_context
|
31,554 |
def main() -> None:
"""main function, parses params and runs command functions
"""
params = demisto.params()
command = demisto.command()
api_key = params.get("apikey")
base_url = urljoin(params.get("url", "").rstrip("/"), "/api")
verify_certificate = not params.get("insecure", False)
proxy = params.get("proxy", False)
try:
client = Client(
api_key=api_key, base_url=base_url, verify=verify_certificate, proxy=proxy
)
client.authenticate()
if command == "test-module":
result = test_module(client)
return_results(result)
elif command == 'fetch-incidents':
max_incidents = check_int(arg=params.get('max_fetch'), arg_name='max_fetch',
min_val=None, max_val=None, required=False)
if not max_incidents or max_incidents > MAX_INCIDENTS:
max_incidents = MAX_INCIDENTS
ff = params.get('first_fetch', DEFAULT_FIRST_FETCH)
if not ff:
raise ValueError('firstFetch not specified')
first_fetch = datestring_to_timestamp_us(ff)
priority = params.get('priority')
activity_status = params.get('activity_status')
progress_status = params.get('progress_status')
business_units = argToList(params.get('business_unit'))
issue_types = argToList(params.get('issue_type'))
tags = argToList(params.get('tag'))
cloud_management_status = argToList(params.get('cloud_management_status'))
sync_tags = argToList(params.get('sync_tags'))
fetch_details: bool = True # forced to True to retrieve the proper asset IDs
mirror_direction = MIRROR_DIRECTION.get(params.get('mirror_direction', 'None'))
next_run, incidents = fetch_incidents(
client=client,
max_incidents=max_incidents,
last_run=demisto.getLastRun(),
first_fetch=first_fetch,
priority=priority,
activity_status=activity_status,
progress_status=progress_status,
business_units=business_units,
tags=tags,
cloud_management_status=cloud_management_status,
issue_types=issue_types,
mirror_direction=mirror_direction,
sync_tags=sync_tags,
fetch_details=fetch_details
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
# To be compatible with 6.1
elif command == "get-modified-remote-data":
raise DemistoException('get-modified-remote-data not implemented')
elif command == "get-remote-data":
sync_owners = argToBoolean(params.get('sync_owners'))
# XXX: mirror_details forced to be disabled to reduce API calls in the backend.
# Will be reviewed in next versions to use XSOAR 6.1 mirroring enhancements.
mirror_details = False
# mirror_details = argToBoolean(params.get('mirror_details'))
incoming_tags = argToList(params.get('incoming_tags'))
return_results(get_remote_data_command(client, demisto.args(), sync_owners, incoming_tags, mirror_details))
elif command == "update-remote-system":
sync_owners = argToBoolean(params.get('sync_owners'))
return_results(update_remote_system_command(client, demisto.args(), sync_owners))
elif command == "expanse-get-issues":
return_results(get_issues_command(client, demisto.args()))
elif command == "expanse-get-issue":
return_results(get_issue_command(client, demisto.args()))
elif command == "expanse-get-issue-updates":
return_results(get_issue_updates_command(client, demisto.args()))
elif command == "expanse-update-issue":
return_results(update_issue_command(client, demisto.args()))
elif command == "expanse-get-issue-comments":
return_results(get_issue_comments_command(client, demisto.args()))
elif command == "expanse-list-businessunits":
return_results(list_businessunits_command(client, demisto.args()))
elif command == "expanse-list-providers":
return_results(list_providers_command(client, demisto.args()))
elif command == "expanse-list-tags":
return_results(list_tags_command(client, demisto.args()))
elif command == "expanse-get-iprange":
return_results(get_iprange_command(client, demisto.args()))
elif command == "expanse-create-tag":
return_results(create_tag_command(client, demisto.args()))
elif command == "expanse-assign-tags-to-asset":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-unassign-tags-from-asset":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-assign-tags-to-iprange":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
args['asset_type'] = 'IpRange'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-unassign-tags-from-iprange":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
args['asset_type'] = 'IpRange'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-assign-tags-to-certificate":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
args['asset_type'] = 'Certificate'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-unassign-tags-from-certificate":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
args['asset_type'] = 'Certificate'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-assign-tags-to-domain":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
args['asset_type'] = 'Domain'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-unassign-tags-from-domain":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
args['asset_type'] = 'Domain'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-get-domain":
return_results(get_domain_command(client, demisto.args()))
elif command == "expanse-get-certificate":
return_results(get_certificate_command(client, demisto.args()))
elif command == "expanse-get-associated-domains":
return_results(get_associated_domains_command(client, demisto.args()))
elif command == "certificate":
return_results(certificate_command(client, demisto.args()))
elif command == "domain":
return_results(domain_command(client, demisto.args()))
elif command == "ip":
return_results(ip_command(client, demisto.args()))
elif command == "cidr":
return_results(cidr_command(client, demisto.args()))
elif command == "expanse-get-cloud-resources":
return_results(get_cloud_resource_command(client, demisto.args()))
elif command == "expanse-get-risky-flows":
return_results(get_risky_flows_command(client, demisto.args()))
elif command == "expanse-list-risk-rules":
return_results(list_risk_rules_command(client, demisto.args()))
elif command == "expanse-get-services":
return_results(get_services_command(client, demisto.args()))
elif command == "expanse-get-service":
return_results(get_service_command(client, demisto.args()))
elif command == "expanse-list-pocs":
return_results(list_pocs_command(client, demisto.args()))
elif command == "expanse-create-poc":
return_results(create_poc_command(client, demisto.args()))
elif command == "expanse-assign-pocs-to-asset":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-unassign-pocs-from-asset":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-assign-pocs-to-iprange":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
args['asset_type'] = 'IpRange'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-unassign-pocs-from-iprange":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
args['asset_type'] = 'IpRange'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-assign-pocs-to-certificate":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
args['asset_type'] = 'Certificate'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-unassign-pocs-from-certificate":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
args['asset_type'] = 'Certificate'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-assign-pocs-to-domain":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
args['asset_type'] = 'Domain'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-unassign-pocs-from-domain":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
args['asset_type'] = 'Domain'
return_results(manage_asset_pocs_command(client, args))
elif command == 'expanse-get-domains-for-certificate':
return_results(domains_for_certificate_command(client, demisto.args()))
else:
raise NotImplementedError(f'Command {command} is not implemented.')
# Log exceptions and return errors
except Exception as e:
# To be compatible with 6.1
if 'not implemented' in str(e):
raise e
demisto.error(traceback.format_exc()) # print the traceback
return_error(
f"Failed to execute {command} command.\nError:\n{str(e)}"
)
|
def main() -> None:
"""main function, parses params and runs command functions
"""
params = demisto.params()
command = demisto.command()
api_key = params.get("apikey")
base_url = urljoin(params.get("url", "").rstrip("/"), "/api")
verify_certificate = not params.get("insecure", False)
proxy = params.get("proxy", False)
try:
client = Client(
api_key=api_key, base_url=base_url, verify=verify_certificate, proxy=proxy
)
client.authenticate()
if command == "test-module":
result = test_module(client)
return_results(result)
elif command == 'fetch-incidents':
max_incidents = check_int(arg=params.get('max_fetch'), arg_name='max_fetch',
min_val=None, max_val=None, required=False)
if not max_incidents or max_incidents > MAX_INCIDENTS:
max_incidents = MAX_INCIDENTS
ff = params.get('first_fetch', DEFAULT_FIRST_FETCH)
if not ff:
raise ValueError('firstFetch not specified')
first_fetch = datestring_to_timestamp_us(ff)
priority = params.get('priority')
activity_status = params.get('activity_status')
progress_status = params.get('progress_status')
business_units = argToList(params.get('business_unit'))
issue_types = argToList(params.get('issue_type'))
tags = argToList(params.get('tag'))
cloud_management_status = params.get('cloud_management_status')
sync_tags = argToList(params.get('sync_tags'))
fetch_details: bool = True # forced to True to retrieve the proper asset IDs
mirror_direction = MIRROR_DIRECTION.get(params.get('mirror_direction', 'None'))
next_run, incidents = fetch_incidents(
client=client,
max_incidents=max_incidents,
last_run=demisto.getLastRun(),
first_fetch=first_fetch,
priority=priority,
activity_status=activity_status,
progress_status=progress_status,
business_units=business_units,
tags=tags,
cloud_management_status=cloud_management_status,
issue_types=issue_types,
mirror_direction=mirror_direction,
sync_tags=sync_tags,
fetch_details=fetch_details
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
# To be compatible with 6.1
elif command == "get-modified-remote-data":
raise DemistoException('get-modified-remote-data not implemented')
elif command == "get-remote-data":
sync_owners = argToBoolean(params.get('sync_owners'))
# XXX: mirror_details forced to be disabled to reduce API calls in the backend.
# Will be reviewed in next versions to use XSOAR 6.1 mirroring enhancements.
mirror_details = False
# mirror_details = argToBoolean(params.get('mirror_details'))
incoming_tags = argToList(params.get('incoming_tags'))
return_results(get_remote_data_command(client, demisto.args(), sync_owners, incoming_tags, mirror_details))
elif command == "update-remote-system":
sync_owners = argToBoolean(params.get('sync_owners'))
return_results(update_remote_system_command(client, demisto.args(), sync_owners))
elif command == "expanse-get-issues":
return_results(get_issues_command(client, demisto.args()))
elif command == "expanse-get-issue":
return_results(get_issue_command(client, demisto.args()))
elif command == "expanse-get-issue-updates":
return_results(get_issue_updates_command(client, demisto.args()))
elif command == "expanse-update-issue":
return_results(update_issue_command(client, demisto.args()))
elif command == "expanse-get-issue-comments":
return_results(get_issue_comments_command(client, demisto.args()))
elif command == "expanse-list-businessunits":
return_results(list_businessunits_command(client, demisto.args()))
elif command == "expanse-list-providers":
return_results(list_providers_command(client, demisto.args()))
elif command == "expanse-list-tags":
return_results(list_tags_command(client, demisto.args()))
elif command == "expanse-get-iprange":
return_results(get_iprange_command(client, demisto.args()))
elif command == "expanse-create-tag":
return_results(create_tag_command(client, demisto.args()))
elif command == "expanse-assign-tags-to-asset":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-unassign-tags-from-asset":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-assign-tags-to-iprange":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
args['asset_type'] = 'IpRange'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-unassign-tags-from-iprange":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
args['asset_type'] = 'IpRange'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-assign-tags-to-certificate":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
args['asset_type'] = 'Certificate'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-unassign-tags-from-certificate":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
args['asset_type'] = 'Certificate'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-assign-tags-to-domain":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
args['asset_type'] = 'Domain'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-unassign-tags-from-domain":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
args['asset_type'] = 'Domain'
return_results(manage_asset_tags_command(client, demisto.args()))
elif command == "expanse-get-domain":
return_results(get_domain_command(client, demisto.args()))
elif command == "expanse-get-certificate":
return_results(get_certificate_command(client, demisto.args()))
elif command == "expanse-get-associated-domains":
return_results(get_associated_domains_command(client, demisto.args()))
elif command == "certificate":
return_results(certificate_command(client, demisto.args()))
elif command == "domain":
return_results(domain_command(client, demisto.args()))
elif command == "ip":
return_results(ip_command(client, demisto.args()))
elif command == "cidr":
return_results(cidr_command(client, demisto.args()))
elif command == "expanse-get-cloud-resources":
return_results(get_cloud_resource_command(client, demisto.args()))
elif command == "expanse-get-risky-flows":
return_results(get_risky_flows_command(client, demisto.args()))
elif command == "expanse-list-risk-rules":
return_results(list_risk_rules_command(client, demisto.args()))
elif command == "expanse-get-services":
return_results(get_services_command(client, demisto.args()))
elif command == "expanse-get-service":
return_results(get_service_command(client, demisto.args()))
elif command == "expanse-list-pocs":
return_results(list_pocs_command(client, demisto.args()))
elif command == "expanse-create-poc":
return_results(create_poc_command(client, demisto.args()))
elif command == "expanse-assign-pocs-to-asset":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-unassign-pocs-from-asset":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-assign-pocs-to-iprange":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
args['asset_type'] = 'IpRange'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-unassign-pocs-from-iprange":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
args['asset_type'] = 'IpRange'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-assign-pocs-to-certificate":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
args['asset_type'] = 'Certificate'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-unassign-pocs-from-certificate":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
args['asset_type'] = 'Certificate'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-assign-pocs-to-domain":
args = demisto.args()
args['operation_type'] = 'ASSIGN'
args['asset_type'] = 'Domain'
return_results(manage_asset_pocs_command(client, args))
elif command == "expanse-unassign-pocs-from-domain":
args = demisto.args()
args['operation_type'] = 'UNASSIGN'
args['asset_type'] = 'Domain'
return_results(manage_asset_pocs_command(client, args))
elif command == 'expanse-get-domains-for-certificate':
return_results(domains_for_certificate_command(client, demisto.args()))
else:
raise NotImplementedError(f'Command {command} is not implemented.')
# Log exceptions and return errors
except Exception as e:
# To be compatible with 6.1
if 'not implemented' in str(e):
raise e
demisto.error(traceback.format_exc()) # print the traceback
return_error(
f"Failed to execute {command} command.\nError:\n{str(e)}"
)
|
25,609 |
def test_locked_transfer_unknown_metadata():
signer = LocalSigner(bytes(range(32)))
additional_data = {
"arbitrary_data": {"new_key": "you didn't expect this, didn't you?"},
# also check that an additional "unknown_data" does not cause an overwrite of
# the "unknown_data" field on the dataclass used for temporary storage of the raw data
"unknown_data": {"test": "123"},
}
# First construct the "original" metadata with processeable routes
route_only_metadata = factories.create(factories.MetadataProperties())
metadata_dict = route_only_metadata.to_dict()
# Add the addtional, unknown data tot he serialized dict
metadata_original_dict = {**metadata_dict, **additional_data}
# Now construct the LockedTransfer that includes the additional, unexpected metadata fields
metadata_properties = factories.MetadataProperties(original_data=metadata_original_dict)
metadata = factories.create(metadata_properties)
locked_transfer = factories.create(factories.LockedTransferProperties(metadata=metadata))
locked_transfer.sign(signer)
json_msg = MessageSerializer.serialize(locked_transfer)
deserialized_message = MessageSerializer.deserialize(json_msg)
assert deserialized_message.sender == signer.address
assert len(deserialized_message.metadata.routes) == 1
assert deserialized_message.metadata.routes == route_only_metadata.routes
# The assert output is more readable when we used dicts than with plain JSON
message_dict = JSONSerializer.deserialize(MessageSerializer.serialize(deserialized_message))
# Explicitly check for the unknown data
metadata_dict = message_dict["metadata"]
for key, value in additional_data.items():
deserialized_value = metadata_dict.get(key)
assert deserialized_value == value
assert message_dict == JSONSerializer.deserialize(json_msg)
|
def test_locked_transfer_unknown_metadata():
signer = LocalSigner(bytes(range(32)))
additional_data = {
"arbitrary_data": {"new_key": "you didn't expect this, did you?"},
# also check that an additional "unknown_data" does not cause an overwrite of
# the "unknown_data" field on the dataclass used for temporary storage of the raw data
"unknown_data": {"test": "123"},
}
# First construct the "original" metadata with processeable routes
route_only_metadata = factories.create(factories.MetadataProperties())
metadata_dict = route_only_metadata.to_dict()
# Add the addtional, unknown data tot he serialized dict
metadata_original_dict = {**metadata_dict, **additional_data}
# Now construct the LockedTransfer that includes the additional, unexpected metadata fields
metadata_properties = factories.MetadataProperties(original_data=metadata_original_dict)
metadata = factories.create(metadata_properties)
locked_transfer = factories.create(factories.LockedTransferProperties(metadata=metadata))
locked_transfer.sign(signer)
json_msg = MessageSerializer.serialize(locked_transfer)
deserialized_message = MessageSerializer.deserialize(json_msg)
assert deserialized_message.sender == signer.address
assert len(deserialized_message.metadata.routes) == 1
assert deserialized_message.metadata.routes == route_only_metadata.routes
# The assert output is more readable when we used dicts than with plain JSON
message_dict = JSONSerializer.deserialize(MessageSerializer.serialize(deserialized_message))
# Explicitly check for the unknown data
metadata_dict = message_dict["metadata"]
for key, value in additional_data.items():
deserialized_value = metadata_dict.get(key)
assert deserialized_value == value
assert message_dict == JSONSerializer.deserialize(json_msg)
|
58,836 |
def pinv(a, rcond=1e-15):
"""Compute the Moore-Penrose pseudoinverse of a matrix.
It computes a pseudoinverse of a matrix ``a``, which is a generalization
of the inverse matrix with Singular Value Decomposition (SVD).
Note that it automatically removes small singular values for stability.
Args:
a (cupy.ndarray): The matrix with dimension ``(..., M, N)``
rcond (float or cupy.ndarray): Cutoff parameter for small singular
values. For stability it computes the largest singular value
denoted by ``s``, and sets all singular values smaller than
``s`` to zero. Broadcasts against the stack of matrices.
Returns:
cupy.ndarray: The pseudoinverse of ``a`` with dimension
``(..., N, M)``.
.. warning::
This function calls one or more cuSOLVER routine(s) which may yield
invalid results if input conditions are not met.
To detect these invalid results, you can set the `linalg`
configuration to a value that is not `ignore` in
:func:`cupyx.errstate` or :func:`cupyx.seterr`.
.. seealso:: :func:`numpy.linalg.pinv`
"""
if a.size == 0:
m, n = a.shape[-2:]
return cupy.empty(a.shape[:-2] + (n, m), dtype=a.dtype)
u, s, vt = _decomposition.svd(a.conj(), full_matrices=False)
# discard small singular values
if cupy.isscalar(rcond):
rcond = cupy.asarray(rcond)
cutoff = rcond[..., None] * cupy.amax(s, axis=-1, keepdims=True)
leq = s <= cutoff
s = 1 / s
s[leq] = 0
return cupy.matmul(vt.swapaxes(-2, -1), s[..., None] * u.swapaxes(-2, -1))
|
def pinv(a, rcond=1e-15):
"""Compute the Moore-Penrose pseudoinverse of a matrix.
It computes a pseudoinverse of a matrix ``a``, which is a generalization
of the inverse matrix with Singular Value Decomposition (SVD).
Note that it automatically removes small singular values for stability.
Args:
a (cupy.ndarray): The matrix with dimension ``(..., M, N)``
rcond (float or cupy.ndarray): Cutoff parameter for small singular
values. For stability it computes the largest singular value
denoted by ``s``, and sets all singular values smaller than
``s`` to zero. Broadcasts against the stack of matrices.
Returns:
cupy.ndarray: The pseudoinverse of ``a`` with dimension
``(..., N, M)``.
.. warning::
This function calls one or more cuSOLVER routine(s) which may yield
invalid results if input conditions are not met.
To detect these invalid results, you can set the `linalg`
configuration to a value that is not `ignore` in
:func:`cupyx.errstate` or :func:`cupyx.seterr`.
.. seealso:: :func:`numpy.linalg.pinv`
"""
if a.size == 0:
m, n = a.shape[-2:]
return cupy.empty(a.shape[:-2] + (n, m), dtype=a.dtype)
u, s, vt = _decomposition.svd(a.conj(), full_matrices=False)
# discard small singular values
cutoff = rcond * cupy.amax(s, axis=-1)
leq = s <= cutoff[..., None]
s = 1 / s
s[leq] = 0
return cupy.matmul(vt.swapaxes(-2, -1), s[..., None] * u.swapaxes(-2, -1))
|
933 |
def failing_assumptions(expr, **assumptions):
"""
Return a dictionary containing assumptions with values not
matching those of the passed assumptions.
Examples
========
>>> from sympy import failing_assumptions, Symbol
>>> x = Symbol('x', positive=True)
>>> y = Symbol('y')
>>> failing_assumptions(6*x + y, positive=True)
{'positive': None, 'real': None}
>>> failing_assumptions(x**2 - 1, positive=True)
{'positive': None}
If *expr* satisfies all of the assumptions, an empty dictionary is returned.
>>> failing_assumptions(x**2, positive=True)
{}
"""
expr = sympify(expr)
failed = {}
for k in assumptions:
test = getattr(expr, 'is_%s' % k, None)
if test is not assumptions[k]:
failed[k] = test
return failed # {} or {assumption: value != desired}
|
def failing_assumptions(expr, **assumptions):
"""
Return a dictionary containing assumptions with values not
matching those of the passed assumptions.
Examples
========
>>> from sympy import failing_assumptions, Symbol
>>> x = Symbol('x', positive=True)
>>> y = Symbol('y')
>>> failing_assumptions(6*x + y, positive=True)
{'positive': None}
>>> failing_assumptions(x**2 - 1, positive=True)
{'positive': None}
If *expr* satisfies all of the assumptions, an empty dictionary is returned.
>>> failing_assumptions(x**2, positive=True)
{}
"""
expr = sympify(expr)
failed = {}
for k in assumptions:
test = getattr(expr, 'is_%s' % k, None)
if test is not assumptions[k]:
failed[k] = test
return failed # {} or {assumption: value != desired}
|
31,068 |
def main():
install_logging('Destroy_instances.log')
circle_aritfact = sys.argv[1]
env_file = sys.argv[2]
instance_role = sys.argv[3]
time_to_live = sys.argv[4]
with open(env_file, 'r') as json_file:
env_results = json.load(json_file)
filtered_results = [env_result for env_result in env_results if env_result["Role"] == instance_role]
for env in filtered_results:
logging.info(f'Downloading server log from {env.get("Role", "Unknown role")}')
ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \
'"sudo chmod -R 755 /var/log/demisto"'
scp_string = 'scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \
'{}@{}:/var/log/demisto/server.log {} || echo "WARN: Failed downloading server.log"'
try:
logging.debug(f'Trying to run {ssh_string}')
subprocess.check_output(
ssh_string.format(env["SSHuser"], env["InstanceDNS"]), shell=True)
except subprocess.CalledProcessError as exc:
logging.exception(exc.output)
try:
server_ip = env["InstanceDNS"].split('.')[0]
subprocess.check_output(
scp_string.format(
env["SSHuser"],
env["InstanceDNS"],
"{}/server_{}_{}.log".format(circle_aritfact, env["Role"].replace(' ', ''), server_ip)),
shell=True)
except subprocess.CalledProcessError as exc:
logging.exception(exc.output)
if time_to_live:
logging.info(f'Skipping - Time to live was set to {time_to_live} minutes')
continue
if os.path.isfile("./Tests/is_build_passed_{}.txt".format(env["Role"].replace(' ', ''))):
logging.info(f'Destroying instance {env.get("Role", "Unknown role")}')
rminstance = aws_functions.destroy_instance(env["Region"], env["InstanceID"])
if aws_functions.isError(rminstance):
logging.error(rminstance)
else:
logging.warning(f'Tests failed on {env.get("Role", "Unknown role")}, keeping instance alive')
|
def main():
install_logging('Destroy_instances.log')
circle_aritfact = sys.argv[1]
env_file = sys.argv[2]
instance_role = sys.argv[3]
time_to_live = sys.argv[4]
with open(env_file, 'r') as json_file:
env_results = json.load(json_file)
filtered_results = [env_result for env_result in env_results if env_result["Role"] == instance_role]
for env in filtered_results:
logging.info(f'Downloading server log from {env.get("Role", "Unknown role")}')
ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \
'"sudo chmod -R 755 /var/log/demisto"'
scp_string = 'scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \
'{}@{}:/var/log/demisto/server.log {} || echo "WARN: Failed downloading server.log"'
try:
logging.debug(f'Trying to run {ssh_string}')
subprocess.check_output(
ssh_string.format(env["SSHuser"], env["InstanceDNS"]), shell=True)
except subprocess.CalledProcessError as exc:
logging.exception(exc.output)
try:
server_ip = env["InstanceDNS"].split('.')[0]
subprocess.check_output(
scp_string.format(
env["SSHuser"],
env["InstanceDNS"],
"{}/server_{}_{}.log".format(circle_aritfact, env["Role"].replace(' ', ''), server_ip)),
shell=True)
except subprocess.CalledProcessError as exc:
logging.exception(exc.output)
if time_to_live:
logging.info(f'Skipping - Time to live was set to {time_to_live} minutes')
continue
if os.path.isfile("./Tests/is_build_passed_{}.txt".format(env["Role"].replace(' ', ''))):
logging.info(f'Destroying instance {env.get("Role", "Unknown role")}')
rminstance = aws_functions.destroy_instance(env["Region"], env["InstanceID"])
if aws_functions.isError(rminstance):
logging.error(rminstance["Message"])
else:
logging.warning(f'Tests failed on {env.get("Role", "Unknown role")}, keeping instance alive')
|
54,523 |
def run(args: argparse.Namespace) -> None:
kurobako_cmd = os.path.join(args.path_to_kurobako, "kurobako")
subprocess.run(f"{kurobako_cmd} --version", shell=True)
if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)):
raise ValueError(f"Data directory {args.data_dir} cannot be found.")
os.makedirs(args.out_dir, exist_ok=True)
study_json_fn = os.path.join(args.out_dir, "studies.json")
subprocess.check_call(f"echo >| {study_json_fn}", shell=True)
solvers_filename = os.path.join(args.out_dir, "solvers.json")
subprocess.check_call(f"echo >| {solvers_filename}", shell=True)
problems_filename = os.path.join(args.out_dir, "problems.json")
subprocess.check_call(f"echo >| {problems_filename}", shell=True)
# Create HPO bench problem.
datasets = [
"fcnet_tabular_benchmarks/fcnet_naval_propulsion_data.hdf5",
"fcnet_tabular_benchmarks/fcnet_parkinsons_telemonitoring_data.hdf5",
"fcnet_tabular_benchmarks/fcnet_protein_structure_data.hdf5",
"fcnet_tabular_benchmarks/fcnet_slice_localization_data.hdf5",
]
for dataset in datasets:
dataset = os.path.join(args.data_dir, dataset)
cmd = f'{kurobako_cmd} problem hpobench "{dataset}" | tee -a {problems_filename}'
subprocess.run(cmd, shell=True)
# Create NAS bench problem.
dataset = os.path.join(args.data_dir, "nasbench_full.bin")
cmd = f'{kurobako_cmd} problem nasbench "{dataset}" | tee -a {problems_filename}'
subprocess.run(cmd, shell=True)
# Create solvers.
sampler_list = args.sampler_list.split()
sampler_kwargs_list = args.sampler_kwargs_list.split()
pruner_list = args.pruner_list.split()
pruner_kwargs_list = args.pruner_kwargs_list.split()
if len(sampler_list) != len(sampler_kwargs_list):
raise ValueError(
"The number of samplers does not match the given keyword arguments. \n"
f"sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}."
)
if len(pruner_list) != len(pruner_kwargs_list):
raise ValueError(
"The number of pruners does not match the given keyword arguments. \n"
f"pruner_list: {pruner_list}, pruner_keyword_arguments: {pruner_kwargs_list}."
)
for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list):
for pruner, pruner_kwargs in zip(pruner_list, pruner_kwargs_list):
name = f"{args.name_prefix}_{sampler}_{pruner}"
cmd = (
f"{kurobako_cmd} solver --name {name} optuna --loglevel debug "
f"--sampler {sampler} --sampler-kwargs {sampler_kwargs} "
f"--pruner {pruner} --pruner-kwargs {pruner_kwargs} "
f"| tee -a {solvers_filename}"
)
subprocess.run(cmd, shell=True)
# Create study.
cmd = (
f"{kurobako_cmd} studies --budget 80 "
f"--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) "
f"--repeats {args.n_runs} --seed {args.seed} "
f"> {study_json_fn}"
)
subprocess.run(cmd, shell=True)
result_filename = os.path.join(args.out_dir, "results.json")
cmd = (
f"cat {study_json_fn} | {kurobako_cmd} run --parallelism {args.n_jobs} "
f"> {result_filename}"
)
subprocess.run(cmd, shell=True)
report_filename = os.path.join(args.out_dir, "report.md")
cmd = f"cat {result_filename} | {kurobako_cmd} report > {report_filename}"
subprocess.run(cmd, shell=True)
cmd = (
f"cat {result_filename} | {kurobako_cmd} plot curve --errorbar -o {args.out_dir} --xmin 10"
)
subprocess.run(cmd, shell=True)
|
def run(args: argparse.Namespace) -> None:
kurobako_cmd = os.path.join(args.path_to_kurobako, "kurobako")
subprocess.run(f"{kurobako_cmd} --version", shell=True)
if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)):
raise ValueError(f"Data directory {args.data_dir} cannot be found.")
os.makedirs(args.out_dir, exist_ok=True)
study_json_fn = os.path.join(args.out_dir, "studies.json")
subprocess.check_call(f"echo >| {study_json_fn}", shell=True)
solvers_filename = os.path.join(args.out_dir, "solvers.json")
subprocess.check_call(f"echo >| {solvers_filename}", shell=True)
problems_filename = os.path.join(args.out_dir, "problems.json")
with open(problems_filename,'w') as f:
pass
# Create HPO bench problem.
datasets = [
"fcnet_tabular_benchmarks/fcnet_naval_propulsion_data.hdf5",
"fcnet_tabular_benchmarks/fcnet_parkinsons_telemonitoring_data.hdf5",
"fcnet_tabular_benchmarks/fcnet_protein_structure_data.hdf5",
"fcnet_tabular_benchmarks/fcnet_slice_localization_data.hdf5",
]
for dataset in datasets:
dataset = os.path.join(args.data_dir, dataset)
cmd = f'{kurobako_cmd} problem hpobench "{dataset}" | tee -a {problems_filename}'
subprocess.run(cmd, shell=True)
# Create NAS bench problem.
dataset = os.path.join(args.data_dir, "nasbench_full.bin")
cmd = f'{kurobako_cmd} problem nasbench "{dataset}" | tee -a {problems_filename}'
subprocess.run(cmd, shell=True)
# Create solvers.
sampler_list = args.sampler_list.split()
sampler_kwargs_list = args.sampler_kwargs_list.split()
pruner_list = args.pruner_list.split()
pruner_kwargs_list = args.pruner_kwargs_list.split()
if len(sampler_list) != len(sampler_kwargs_list):
raise ValueError(
"The number of samplers does not match the given keyword arguments. \n"
f"sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}."
)
if len(pruner_list) != len(pruner_kwargs_list):
raise ValueError(
"The number of pruners does not match the given keyword arguments. \n"
f"pruner_list: {pruner_list}, pruner_keyword_arguments: {pruner_kwargs_list}."
)
for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list):
for pruner, pruner_kwargs in zip(pruner_list, pruner_kwargs_list):
name = f"{args.name_prefix}_{sampler}_{pruner}"
cmd = (
f"{kurobako_cmd} solver --name {name} optuna --loglevel debug "
f"--sampler {sampler} --sampler-kwargs {sampler_kwargs} "
f"--pruner {pruner} --pruner-kwargs {pruner_kwargs} "
f"| tee -a {solvers_filename}"
)
subprocess.run(cmd, shell=True)
# Create study.
cmd = (
f"{kurobako_cmd} studies --budget 80 "
f"--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) "
f"--repeats {args.n_runs} --seed {args.seed} "
f"> {study_json_fn}"
)
subprocess.run(cmd, shell=True)
result_filename = os.path.join(args.out_dir, "results.json")
cmd = (
f"cat {study_json_fn} | {kurobako_cmd} run --parallelism {args.n_jobs} "
f"> {result_filename}"
)
subprocess.run(cmd, shell=True)
report_filename = os.path.join(args.out_dir, "report.md")
cmd = f"cat {result_filename} | {kurobako_cmd} report > {report_filename}"
subprocess.run(cmd, shell=True)
cmd = (
f"cat {result_filename} | {kurobako_cmd} plot curve --errorbar -o {args.out_dir} --xmin 10"
)
subprocess.run(cmd, shell=True)
|
43,227 |
def test_pickle():
size = 100
vbuf = ReplayBuffer(size, stack_num=2)
lbuf = ListReplayBuffer()
pbuf = PrioritizedReplayBuffer(size, 0.6, 0.4)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
rew = torch.tensor([1.]).to(device)
print(rew)
for i in range(4):
vbuf.add(obs=Batch(index=np.array([i])), act=0, rew=rew, done=0)
for i in range(3):
lbuf.add(obs=Batch(index=np.array([i])), act=1, rew=rew, done=0)
for i in range(5):
pbuf.add(obs=Batch(index=np.array([i])),
act=2, rew=rew, done=0, weight=np.random.rand())
# save
pickle.dump(vbuf, open('/tmp/vbuf.pkl', 'wb'))
pickle.dump(lbuf, open('/tmp/lbuf.pkl', 'wb'))
pickle.dump(pbuf, open('/tmp/pbuf.pkl', 'wb'))
# load
_vbuf = pickle.load(open('/tmp/vbuf.pkl', 'rb'))
_lbuf = pickle.load(open('/tmp/lbuf.pkl', 'rb'))
_pbuf = pickle.load(open('/tmp/pbuf.pkl', 'rb'))
assert len(_vbuf) == len(vbuf) and np.allclose(_vbuf.act, vbuf.act)
assert len(_lbuf) == len(lbuf) and np.allclose(_lbuf.act, lbuf.act)
assert len(_pbuf) == len(pbuf) and np.allclose(_pbuf.act, pbuf.act)
# make sure the meta var is identical
assert _vbuf.stack_num == vbuf.stack_num
assert np.allclose(_pbuf.weight[np.arange(len(_pbuf))],
pbuf.weight[np.arange(len(pbuf))])
|
def test_pickle():
size = 100
vbuf = ReplayBuffer(size, stack_num=2)
lbuf = ListReplayBuffer()
pbuf = PrioritizedReplayBuffer(size, 0.6, 0.4)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
rew = torch.tensor([1.]).to(device)
print(rew)
for i in range(4):
vbuf.add(obs=Batch(index=np.array([i])), act=0, rew=rew, done=0)
for i in range(3):
lbuf.add(obs=Batch(index=np.array([i])), act=1, rew=rew, done=0)
for i in range(5):
pbuf.add(obs=Batch(index=np.array([i])),
act=2, rew=rew, done=0, weight=np.random.rand())
# save
pickle.dump(vbuf, open('/tmp/vbuf.pkl', 'wb'))
ls = pickle.dumps(lbuf)
pickle.dump(pbuf, open('/tmp/pbuf.pkl', 'wb'))
# load
_vbuf = pickle.load(open('/tmp/vbuf.pkl', 'rb'))
_lbuf = pickle.load(open('/tmp/lbuf.pkl', 'rb'))
_pbuf = pickle.load(open('/tmp/pbuf.pkl', 'rb'))
assert len(_vbuf) == len(vbuf) and np.allclose(_vbuf.act, vbuf.act)
assert len(_lbuf) == len(lbuf) and np.allclose(_lbuf.act, lbuf.act)
assert len(_pbuf) == len(pbuf) and np.allclose(_pbuf.act, pbuf.act)
# make sure the meta var is identical
assert _vbuf.stack_num == vbuf.stack_num
assert np.allclose(_pbuf.weight[np.arange(len(_pbuf))],
pbuf.weight[np.arange(len(pbuf))])
|
43,234 |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='FrozenLake-v0')
parser.add_argument('--seed', type=int, default=1626)
parser.add_argument('--eps-test', type=float, default=0)
parser.add_argument('--eps-train', type=float, default=0.1)
parser.add_argument('--buffer-size', type=int, default=1)
parser.add_argument('--epoch', type=int, default=100)
parser.add_argument('--step-per-epoch', type=int, default=100)
parser.add_argument('--collect-per-step', type=int, default=100)
parser.add_argument('--repeat-per-collect', type=int, default=1)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--training-num', type=int, default=8)
parser.add_argument('--test-num', type=int, default=1)
parser.add_argument('--logdir', type=str, default='log')
parser.add_argument('--render', type=float, default=0.)
args = parser.parse_known_args()[0]
return args
|
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='FrozenLake-v0')
parser.add_argument('--seed', type=int, default=1626)
parser.add_argument('--eps-test', type=float, default=0)
parser.add_argument('--eps-train', type=float, default=0.1)
parser.add_argument('--buffer-size', type=int, default=1)
parser.add_argument('--epoch', type=int, default=100)
parser.add_argument('--step-per-epoch', type=int, default=100)
parser.add_argument('--collect-per-step', type=int, default=100)
parser.add_argument('--repeat-per-collect', type=int, default=1)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--training-num', type=int, default=8)
parser.add_argument('--test-num', type=int, default=1)
parser.add_argument('--logdir', type=str, default='log')
parser.add_argument('--render', type=float, default=0.)
return parser.parse_args()
|
59,715 |
def test_grdhisteq_no_outgrid(grid):
"""
Test the azimuth and direction parameters for grdhisteq with no set
outgrid.
"""
temp_grid = grdhisteq(grid=grid)
assert temp_grid.dims == ("lat", "lon")
assert temp_grid.gmt.gtype == 1 # Geographic grid
assert temp_grid.gmt.registration == 1 # Pixel registration
|
def test_grdhisteq_no_outgrid(grid):
"""
Test the azimuth and direction parameters for grdhisteq with no set
outgrid.
"""
temp_grid = grdhisteq(grid=grid)
assert temp_grid.gmt.gtype == 1 # Geographic grid
assert temp_grid.gmt.registration == 1 # Pixel registration
expected_grid = xr.DataArray(
data=[[4.0, 0.0, 8.0, 11.0], [13.0, 4.0, 8.0, 13.0], [15.0, 15.0, 15.0, 15.0]],
coords=dict(lon=[-2.5, -1.5, -0.5, 0.5], lat=[2.5, 3.5, 4.5]),
dims=["lat", "lon"],
)
xrt.assert_allclose(a=temp_grid, b=expected_grid)
|
38,922 |
def find_validators( # noqa: C901 (ignore complexity)
type_: AnyType, config: Type['BaseConfig']
) -> Generator[AnyCallable, None, None]:
if type_ is Any:
return
type_type = type(type_)
if type_type == ForwardRef or type_type == TypeVar:
return
if type_ is Pattern:
yield from pattern_validators
return
if is_callable_type(type_):
yield callable_validator
return
if is_literal_type(type_):
yield make_literal_validator(type_)
return
supertype = _find_supertype(type_)
if supertype is not None:
type_ = supertype
for val_type, validators in _VALIDATORS:
try:
if issubclass(type_, val_type):
for v in validators:
if isinstance(v, IfConfig):
if v.check(config):
yield v.validator
else:
yield v
return
except TypeError as e:
raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})') from e
if config.arbitrary_types_allowed:
yield make_arbitrary_type_validator(type_)
else:
raise RuntimeError(f'no validator found for {type_}\n see keep_untouched or arbitrary_types_allowed in Config')
|
def find_validators( # noqa: C901 (ignore complexity)
type_: AnyType, config: Type['BaseConfig']
) -> Generator[AnyCallable, None, None]:
if type_ is Any:
return
type_type = type(type_)
if type_type == ForwardRef or type_type == TypeVar:
return
if type_ is Pattern:
yield from pattern_validators
return
if is_callable_type(type_):
yield callable_validator
return
if is_literal_type(type_):
yield make_literal_validator(type_)
return
supertype = _find_supertype(type_)
if supertype is not None:
type_ = supertype
for val_type, validators in _VALIDATORS:
try:
if issubclass(type_, val_type):
for v in validators:
if isinstance(v, IfConfig):
if v.check(config):
yield v.validator
else:
yield v
return
except TypeError as e:
raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})') from e
if config.arbitrary_types_allowed:
yield make_arbitrary_type_validator(type_)
else:
raise RuntimeError(f'no validator found for {type_} see `keep_untouched` or `arbitrary_types_allowed` in Config')
|
7,225 |
def canny(image, sigma=1., low_threshold=None, high_threshold=None, mask=None,
use_quantiles=False):
"""Edge filter an image using the Canny algorithm.
Parameters
-----------
image : 2D array
Grayscale input image to detect edges on; can be of any dtype.
sigma : float, optional
Standard deviation of the Gaussian filter.
low_threshold : float, optional
Lower bound for hysteresis thresholding (linking edges).
If None, low_threshold is set to 10% of dtype's max.
high_threshold : float, optional
Upper bound for hysteresis thresholding (linking edges).
If None, high_threshold is set to 20% of dtype's max.
mask : array, dtype=bool, optional
Mask to limit the application of Canny to a certain area.
use_quantiles : bool, optional
If True then treat low_threshold and high_threshold as quantiles of the
edge magnitude image, rather than absolute edge magnitude values. If True
then the thresholds must be in the range [0, 1].
Returns
-------
output : 2D array (image)
The binary edge map.
See also
--------
skimage.sobel
Notes
-----
The steps of the algorithm are as follows:
* Smooth the image using a Gaussian with ``sigma`` width.
* Apply the horizontal and vertical Sobel operators to get the gradients
within the image. The edge strength is the norm of the gradient.
* Thin potential edges to 1-pixel wide curves. First, find the normal
to the edge at each point. This is done by looking at the
signs and the relative magnitude of the X-Sobel and Y-Sobel
to sort the points into 4 categories: horizontal, vertical,
diagonal and antidiagonal. Then look in the normal and reverse
directions to see if the values in either of those directions are
greater than the point in question. Use interpolation to get a mix of
points instead of picking the one that's the closest to the normal.
* Perform a hysteresis thresholding: first label all points above the
high threshold as edges. Then recursively label any point above the
low threshold that is 8-connected to a labeled point as an edge.
References
-----------
.. [1] Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
Pattern Analysis and Machine Intelligence, 8:679-714, 1986
:DOI:`10.1109/TPAMI.1986.4767851`
.. [2] William Green's Canny tutorial
https://en.wikipedia.org/wiki/Canny_edge_detector
Examples
--------
>>> from skimage import feature
>>> # Generate noisy image of a square
>>> im = np.zeros((256, 256))
>>> im[64:-64, 64:-64] = 1
>>> im += 0.2 * np.random.rand(*im.shape)
>>> # First trial with the Canny filter, with the default smoothing
>>> edges1 = feature.canny(im)
>>> # Increase the smoothing for better results
>>> edges2 = feature.canny(im, sigma=3)
"""
#
# The steps involved:
#
# * Smooth using the Gaussian with sigma above.
#
# * Apply the horizontal and vertical Sobel operators to get the gradients
# within the image. The edge strength is the sum of the magnitudes
# of the gradients in each direction.
#
# * Find the normal to the edge at each point using the arctangent of the
# ratio of the Y sobel over the X sobel - pragmatically, we can
# look at the signs of X and Y and the relative magnitude of X vs Y
# to sort the points into 4 categories: horizontal, vertical,
# diagonal and antidiagonal.
#
# * Look in the normal and reverse directions to see if the values
# in either of those directions are greater than the point in question.
# Use interpolation to get a mix of points instead of picking the one
# that's the closest to the normal.
#
# * Label all points above the high threshold as edges.
# * Recursively label any point above the low threshold that is 8-connected
# to a labeled point as an edge.
#
# Regarding masks, any point touching a masked point will have a gradient
# that is "infected" by the masked point, so it's enough to erode the
# mask by one and then mask the output. We also mask out the border points
# because who knows what lies beyond the edge of the image?
#
check_nD(image, 2)
dtype_max = dtype_limits(image, clip_negative=False)[1]
if low_threshold is None:
low_threshold = 0.1
elif use_quantiles:
if not(0.0 <= low_threshold <= 1.0):
raise ValueError("Quantile thresholds must not be > 1.0")
else:
low_threshold = low_threshold / dtype_max
if high_threshold is None:
high_threshold = 0.2
elif use_quantiles:
if not(0.0 <= high_threshold <= 1.0):
raise ValueError("Quantile thresholds must not be > 1.0")
else:
high_threshold = high_threshold / dtype_max
if mask is None:
mask = np.ones(image.shape, dtype=bool)
def fsmooth(x):
return img_as_float(gaussian(x, sigma, mode='constant'))
smoothed = smooth_with_function_and_mask(image, fsmooth, mask)
jsobel = ndi.sobel(smoothed, axis=1)
isobel = ndi.sobel(smoothed, axis=0)
abs_isobel = np.abs(isobel)
abs_jsobel = np.abs(jsobel)
magnitude = np.hypot(isobel, jsobel)
#
# Make the eroded mask. Setting the border value to zero will wipe
# out the image edges for us.
#
s = generate_binary_structure(2, 2)
eroded_mask = binary_erosion(mask, s, border_value=0)
eroded_mask = eroded_mask & (magnitude > 0)
#
#--------- Find local maxima --------------
#
# Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
# 90-135 degrees and 135-180 degrees.
#
local_maxima = np.zeros(image.shape, bool)
#----- 0 to 45 degrees ------
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
# Get the magnitudes shifted left to make a matrix of the points to the
# right of pts. Similarly, shift left and down to get the points to the
# top right of pts.
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 45 to 90 degrees ------
# Mix diagonal and vertical
#
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:, 1:][pts[:, :-1]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 90 to 135 degrees ------
# Mix anti-diagonal and vertical
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1a = magnitude[:, 1:][pts[:, :-1]]
c2a = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2a * w + c1a * (1.0 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1.0 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 135 to 180 degrees ------
# Mix anti-diagonal and anti-horizontal
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#
#---- If use_quantiles is set then calculate the thresholds to use
#
if use_quantiles:
if high_threshold > 1.0 or low_threshold > 1.0:
raise ValueError("Quantile thresholds must not be > 1.0")
if high_threshold < 0.0 or low_threshold < 0.0:
raise ValueError("Quantile thresholds must not be < 0.0")
high_threshold = np.percentile(magnitude, 100.0 * high_threshold)
low_threshold = np.percentile(magnitude, 100.0 * low_threshold)
#
#---- Create two masks at the two thresholds.
#
high_mask = local_maxima & (magnitude >= high_threshold)
low_mask = local_maxima & (magnitude >= low_threshold)
#
# Segment the low-mask, then only keep low-segments that have
# some high_mask component in them
#
strel = np.ones((3, 3), bool)
labels, count = label(low_mask, strel)
if count == 0:
return low_mask
sums = (np.array(ndi.sum(high_mask, labels,
np.arange(count, dtype=np.int32) + 1),
copy=False, ndmin=1))
good_label = np.zeros((count + 1,), bool)
good_label[1:] = sums > 0
output_mask = good_label[labels]
return output_mask
|
def canny(image, sigma=1., low_threshold=None, high_threshold=None, mask=None,
use_quantiles=False):
"""Edge filter an image using the Canny algorithm.
Parameters
-----------
image : 2D array
Grayscale input image to detect edges on; can be of any dtype.
sigma : float, optional
Standard deviation of the Gaussian filter.
low_threshold : float, optional
Lower bound for hysteresis thresholding (linking edges).
If None, low_threshold is set to 10% of dtype's max.
high_threshold : float, optional
Upper bound for hysteresis thresholding (linking edges).
If None, high_threshold is set to 20% of dtype's max.
mask : array, dtype=bool, optional
Mask to limit the application of Canny to a certain area.
use_quantiles : bool, optional
If True then treat low_threshold and high_threshold as quantiles of the
edge magnitude image, rather than absolute edge magnitude values. If True
then the thresholds must be in the range [0, 1].
Returns
-------
output : 2D array (image)
The binary edge map.
See also
--------
skimage.sobel
Notes
-----
The steps of the algorithm are as follows:
* Smooth the image using a Gaussian with ``sigma`` width.
* Apply the horizontal and vertical Sobel operators to get the gradients
within the image. The edge strength is the norm of the gradient.
* Thin potential edges to 1-pixel wide curves. First, find the normal
to the edge at each point. This is done by looking at the
signs and the relative magnitude of the X-Sobel and Y-Sobel
to sort the points into 4 categories: horizontal, vertical,
diagonal and antidiagonal. Then look in the normal and reverse
directions to see if the values in either of those directions are
greater than the point in question. Use interpolation to get a mix of
points instead of picking the one that's the closest to the normal.
* Perform a hysteresis thresholding: first label all points above the
high threshold as edges. Then recursively label any point above the
low threshold that is 8-connected to a labeled point as an edge.
References
-----------
.. [1] Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
Pattern Analysis and Machine Intelligence, 8:679-714, 1986
:DOI:`10.1109/TPAMI.1986.4767851`
.. [2] William Green's Canny tutorial
https://en.wikipedia.org/wiki/Canny_edge_detector
Examples
--------
>>> from skimage import feature
>>> # Generate noisy image of a square
>>> im = np.zeros((256, 256))
>>> im[64:-64, 64:-64] = 1
>>> im += 0.2 * np.random.rand(*im.shape)
>>> # First trial with the Canny filter, with the default smoothing
>>> edges1 = feature.canny(im)
>>> # Increase the smoothing for better results
>>> edges2 = feature.canny(im, sigma=3)
"""
#
# The steps involved:
#
# * Smooth using the Gaussian with sigma above.
#
# * Apply the horizontal and vertical Sobel operators to get the gradients
# within the image. The edge strength is the sum of the magnitudes
# of the gradients in each direction.
#
# * Find the normal to the edge at each point using the arctangent of the
# ratio of the Y sobel over the X sobel - pragmatically, we can
# look at the signs of X and Y and the relative magnitude of X vs Y
# to sort the points into 4 categories: horizontal, vertical,
# diagonal and antidiagonal.
#
# * Look in the normal and reverse directions to see if the values
# in either of those directions are greater than the point in question.
# Use interpolation to get a mix of points instead of picking the one
# that's the closest to the normal.
#
# * Label all points above the high threshold as edges.
# * Recursively label any point above the low threshold that is 8-connected
# to a labeled point as an edge.
#
# Regarding masks, any point touching a masked point will have a gradient
# that is "infected" by the masked point, so it's enough to erode the
# mask by one and then mask the output. We also mask out the border points
# because who knows what lies beyond the edge of the image?
#
check_nD(image, 2)
dtype_max = dtype_limits(image, clip_negative=False)[1]
if low_threshold is None:
low_threshold = 0.1
elif use_quantiles:
if not(0.0 <= low_threshold <= 1.0):
raise ValueError("Quantile thresholds must be between 0 and 1.")
else:
low_threshold = low_threshold / dtype_max
if high_threshold is None:
high_threshold = 0.2
elif use_quantiles:
if not(0.0 <= high_threshold <= 1.0):
raise ValueError("Quantile thresholds must not be > 1.0")
else:
high_threshold = high_threshold / dtype_max
if mask is None:
mask = np.ones(image.shape, dtype=bool)
def fsmooth(x):
return img_as_float(gaussian(x, sigma, mode='constant'))
smoothed = smooth_with_function_and_mask(image, fsmooth, mask)
jsobel = ndi.sobel(smoothed, axis=1)
isobel = ndi.sobel(smoothed, axis=0)
abs_isobel = np.abs(isobel)
abs_jsobel = np.abs(jsobel)
magnitude = np.hypot(isobel, jsobel)
#
# Make the eroded mask. Setting the border value to zero will wipe
# out the image edges for us.
#
s = generate_binary_structure(2, 2)
eroded_mask = binary_erosion(mask, s, border_value=0)
eroded_mask = eroded_mask & (magnitude > 0)
#
#--------- Find local maxima --------------
#
# Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
# 90-135 degrees and 135-180 degrees.
#
local_maxima = np.zeros(image.shape, bool)
#----- 0 to 45 degrees ------
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
# Get the magnitudes shifted left to make a matrix of the points to the
# right of pts. Similarly, shift left and down to get the points to the
# top right of pts.
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 45 to 90 degrees ------
# Mix diagonal and vertical
#
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:, 1:][pts[:, :-1]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 90 to 135 degrees ------
# Mix anti-diagonal and vertical
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1a = magnitude[:, 1:][pts[:, :-1]]
c2a = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2a * w + c1a * (1.0 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1.0 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 135 to 180 degrees ------
# Mix anti-diagonal and anti-horizontal
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#
#---- If use_quantiles is set then calculate the thresholds to use
#
if use_quantiles:
if high_threshold > 1.0 or low_threshold > 1.0:
raise ValueError("Quantile thresholds must not be > 1.0")
if high_threshold < 0.0 or low_threshold < 0.0:
raise ValueError("Quantile thresholds must not be < 0.0")
high_threshold = np.percentile(magnitude, 100.0 * high_threshold)
low_threshold = np.percentile(magnitude, 100.0 * low_threshold)
#
#---- Create two masks at the two thresholds.
#
high_mask = local_maxima & (magnitude >= high_threshold)
low_mask = local_maxima & (magnitude >= low_threshold)
#
# Segment the low-mask, then only keep low-segments that have
# some high_mask component in them
#
strel = np.ones((3, 3), bool)
labels, count = label(low_mask, strel)
if count == 0:
return low_mask
sums = (np.array(ndi.sum(high_mask, labels,
np.arange(count, dtype=np.int32) + 1),
copy=False, ndmin=1))
good_label = np.zeros((count + 1,), bool)
good_label[1:] = sums > 0
output_mask = good_label[labels]
return output_mask
|
3,119 |
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.floating import Float64Dtype
from pandas.core.arrays.integer import Int64Dtype
if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype):
return Int64Dtype()
elif how in ["mean", "median", "var"] and isinstance(dtype, Int64Dtype):
return Float64Dtype()
return dtype
|
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.floating import Float64Dtype
from pandas.core.arrays.integer import Int64Dtype
if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype):
return Int64Dtype()
elif how in ["mean", "median", "var"] and isinstance(dtype, (Int64Dtype, BooleanDtype)):
return Float64Dtype()
return dtype
|
2,890 |
def type_of_target(y, input_name=""):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
input_name : str, default=""
The data name used to construct the error message.
.. versionadded:: 1.1.0
Returns
-------
target_type : str
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> from sklearn.utils.multiclass import type_of_target
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = (
isinstance(y, Sequence) or issparse(y) or hasattr(y, "__array__")
) and not isinstance(y, str)
if not valid:
raise ValueError(
"Expected array-like (array or non-string sequence), got %r" % y
)
sparse_pandas = y.__class__.__name__ in ["SparseSeries", "SparseArray"]
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return "multilabel-indicator"
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
with warnings.catch_warnings():
warnings.simplefilter("error", np.VisibleDeprecationWarning)
try:
y = np.asarray(y)
except (np.VisibleDeprecationWarning, ValueError):
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = np.asarray(y, dtype=object)
# The old sequence of sequences format
try:
if (
not hasattr(y[0], "__array__")
and isinstance(y[0], Sequence)
and not isinstance(y[0], str)
):
raise ValueError(
"You appear to be using a legacy multi-label data"
" representation. Sequence of sequences are no"
" longer supported; use a binary array or sparse"
" matrix instead - the MultiLabelBinarizer"
" transformer can convert to this format."
)
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and not isinstance(y.flat[0], str)):
# [[[1, 2]]] or [obj_1] and not ["label_1"]
return (
"Unkown label type. Use an object array containing strings or encode the"
" values in a contiguous numerical array."
)
if y.ndim == 2 and y.shape[1] == 0:
return "unknown" # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == "f" and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
_assert_all_finite(y, input_name=input_name)
return "continuous" + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return "multiclass" + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return "binary" # [1, 2] or [["a"], ["b"]]
|
def type_of_target(y, input_name=""):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
input_name : str, default=""
The data name used to construct the error message.
.. versionadded:: 1.1.0
Returns
-------
target_type : str
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> from sklearn.utils.multiclass import type_of_target
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = (
isinstance(y, Sequence) or issparse(y) or hasattr(y, "__array__")
) and not isinstance(y, str)
if not valid:
raise ValueError(
"Expected array-like (array or non-string sequence), got %r" % y
)
sparse_pandas = y.__class__.__name__ in ["SparseSeries", "SparseArray"]
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return "multilabel-indicator"
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
with warnings.catch_warnings():
warnings.simplefilter("error", np.VisibleDeprecationWarning)
try:
y = np.asarray(y)
except (np.VisibleDeprecationWarning, ValueError):
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = np.asarray(y, dtype=object)
# The old sequence of sequences format
try:
if (
not hasattr(y[0], "__array__")
and isinstance(y[0], Sequence)
and not isinstance(y[0], str)
):
raise ValueError(
"You appear to be using a legacy multi-label data"
" representation. Sequence of sequences are no"
" longer supported; use a binary array or sparse"
" matrix instead - the MultiLabelBinarizer"
" transformer can convert to this format."
)
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and not isinstance(y.flat[0], str)):
# [[[1, 2]]] or [obj_1] and not ["label_1"]
return (
"Unknown label type. Use an object array containing strings or encode the"
" values in a contiguous numerical array."
)
if y.ndim == 2 and y.shape[1] == 0:
return "unknown" # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == "f" and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
_assert_all_finite(y, input_name=input_name)
return "continuous" + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return "multiclass" + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return "binary" # [1, 2] or [["a"], ["b"]]
|
8,968 |
def trim_docstring(doc):
"""Get the docstring as a series of lines that can be sent.
:param str doc: a callable's docstring to trim
:return: a list of trimmed lines
:rtype: list
This function acts like :func:`inspect.cleandoc` but doesn't replace tabs,
and instead of a :class:`str`, it returns a :class:`list`.
"""
if not doc:
return []
lines = doc.expandtabs().splitlines()
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[:].rstrip())
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
return trimmed
|
def trim_docstring(doc):
"""Get the docstring as a series of lines that can be sent.
:param str doc: a callable's docstring to trim
:return: a list of trimmed lines
:rtype: list
This function acts like :func:`inspect.cleandoc` but doesn't replace tabs,
and instead of a :class:`str` it returns a :class:`list`.
"""
if not doc:
return []
lines = doc.expandtabs().splitlines()
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[:].rstrip())
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
return trimmed
|
27,441 |
def download_IERS_A(show_progress=True):
"""
Download and cache the IERS Bulletin A table.
If one is already cached, download a new one and overwrite the old. Store
table in the astropy cache, and undo the monkey patching done by
`~astroplan.get_IERS_A_or_workaround`.
Parameters
----------
show_progress : bool
`True` shows a progress bar during the download.
"""
urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
if IERS_A_in_cache():
for url in urls:
clear_download_cache(url)
for i, url in enumerate(urls):
try:
local_iers_a_path = download_file(iers.IERS_A_URL, cache=True,
show_progress=show_progress)
except urllib.error.URLError:
if i == len(urls) - 1:
raise
# Undo monkey patch set up by get_IERS_A_or_workaround
iers.IERS.iers_table = iers.IERS_A.open(local_iers_a_path)
Time._get_delta_ut1_utc = BACKUP_Time_get_delta_ut1_utc
|
def download_IERS_A(show_progress=True):
"""
Download and cache the IERS Bulletin A table.
If one is already cached, download a new one and overwrite the old. Store
table in the astropy cache, and undo the monkey patching done by
`~astroplan.get_IERS_A_or_workaround`.
Parameters
----------
show_progress : bool
`True` shows a progress bar during the download.
"""
urls = (iers.IERS_A_URL, iers.IERS_A_URL_MIRROR)
if IERS_A_in_cache():
for url in urls:
clear_download_cache(url)
for i, url in enumerate(urls):
try:
local_iers_a_path = download_file(iers.IERS_A_URL, cache=True,
show_progress=show_progress)
except urllib.error.URLError:
if i == len(urls) - 1:
raise
# Undo monkey patch set up by get_IERS_A_or_workaround
iers.IERS.iers_table = iers.IERS_A.open(local_iers_a_path)
Time._get_delta_ut1_utc = BACKUP_Time_get_delta_ut1_utc
|
30,402 |
def users_to_entry(title, response, nex_page_token=None):
context = []
for user_data in response:
username = user_data.get('name').get('givenName') if user_data.get('name') \
and 'givenName' in user_data.get('name') else None
display = user_data.get('name').get('fullName') if user_data.get('name') \
and 'fullName' in user_data.get('name') else None
context.append({
'Type': 'Google',
'ID': user_data.get('id'),
'UserName': username,
'Username': username, # adding to fit the new context standard
'DisplayName': display,
'Email': {'Address': user_data.get('primaryEmail')},
'Gmail': {'Address': user_data.get('primaryEmail')},
'Group': user_data.get('kind'),
'Groups': user_data.get('kind'), # adding to fit the new context standard
'CustomerId': user_data.get('customerId'),
'Domain': user_data.get('primaryEmail').split('@')[1],
'VisibleInDirectory': user_data.get('includeInGlobalAddressList'),
})
headers = ['Type', 'ID', 'Username',
'DisplayName', 'Groups', 'CustomerId', 'Domain', 'OrganizationUnit', 'Email', 'VisibleInDirectory',
'nextPageToken']
human_readable = tableToMarkdown(title, context, headers, removeNull=True)
if nex_page_token:
human_readable += "\nTo get further results, rerun the command with this page-token:\n" + nex_page_token
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': response,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': {'Account(val.ID && val.Type && val.ID == obj.ID && val.Type == obj.Type)': context}
}
|
def users_to_entry(title, response, nex_page_token=None):
context = []
for user_data in response:
username = user_data.get('name').get('givenName') if user_data.get('name') \
and 'givenName' in user_data.get('name') else None
display = user_data.get('name').get('fullName') if user_data.get('name') \
and 'fullName' in user_data.get('name') else None
context.append({
'Type': 'Google',
'ID': user_data.get('id'),
'UserName': username,
'Username': username, # adding to fit the new context standard
'DisplayName': display,
'Email': {'Address': user_data.get('primaryEmail')},
'Gmail': {'Address': user_data.get('primaryEmail')},
'Group': user_data.get('kind'),
'Groups': user_data.get('kind'), # adding to fit the new context standard
'CustomerId': user_data.get('customerId'),
'Domain': user_data.get('primaryEmail').split('@')[1],
'VisibleInDirectory': user_data.get('includeInGlobalAddressList'),
})
headers = ['Type', 'ID', 'Username',
'DisplayName', 'Groups', 'CustomerId', 'Domain', 'OrganizationUnit', 'Email', 'VisibleInDirectory',
'nextPageToken']
human_readable = tableToMarkdown(title, context, headers, removeNull=True)
if nex_page_token:
human_readable += "\nTo get further results, rerun the command with this page-token:\n" + next_page_token
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': response,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': {'Account(val.ID && val.Type && val.ID == obj.ID && val.Type == obj.Type)': context}
}
|
53,849 |
def normalize_disk_info(image_data_disks=None,
data_disk_sizes_gb=None, attach_data_disks=None, storage_sku=None,
os_disk_caching=None, data_disk_cachings=None, size='',
ephemeral_os_disk=False, ephemeral_os_disk_placement=None,
data_disk_delete_option=None):
from msrestazure.tools import is_valid_resource_id
from ._validators import validate_delete_options
is_lv_size = re.search('_L[0-9]+s', size, re.I)
# we should return a dictionary with info like below
# {
# 'os': { caching: 'Read', write_accelerator: None},
# 0: { caching: 'None', write_accelerator: True},
# 1: { caching: 'None', write_accelerator: True},
# }
info = {}
used_luns = set()
attach_data_disks = attach_data_disks or []
data_disk_sizes_gb = data_disk_sizes_gb or []
image_data_disks = image_data_disks or []
if attach_data_disks and data_disk_delete_option:
data_disk_delete_option = validate_delete_options(attach_data_disks, data_disk_delete_option)
info['os'] = {}
# update os diff disk settings
if ephemeral_os_disk:
info['os']['diffDiskSettings'] = {'option': 'Local'}
# local os disks require readonly caching, default to ReadOnly if os_disk_caching not specified.
if not os_disk_caching:
os_disk_caching = 'ReadOnly'
if ephemeral_os_disk_placement:
info['os']['diffDiskSettings']['placement'] = ephemeral_os_disk_placement
# add managed image data disks
for data_disk in image_data_disks:
i = data_disk['lun']
info[i] = {
'lun': i,
'managedDisk': {'storageAccountType': None},
'createOption': 'fromImage'
}
used_luns.add(i)
# add empty data disks, do not use existing luns
i = 0
sizes_copy = list(data_disk_sizes_gb)
while sizes_copy:
# get free lun
while i in used_luns:
i += 1
used_luns.add(i)
info[i] = {
'lun': i,
'managedDisk': {'storageAccountType': None},
'createOption': 'empty',
'diskSizeGB': sizes_copy.pop(0),
'deleteOption': data_disk_delete_option if isinstance(data_disk_delete_option, str) else None
}
# update storage skus for managed data disks
if storage_sku is not None:
update_disk_sku_info(info, storage_sku)
# check that os storage account type is not UltraSSD_LRS
if info['os'].get('storageAccountType', "").lower() == 'ultrassd_lrs':
logger.warning("Managed os disk storage account sku cannot be UltraSSD_LRS. Using service default.")
info['os']['storageAccountType'] = None
# add attached data disks
i = 0
attach_data_disks_copy = list(attach_data_disks)
while attach_data_disks_copy:
# get free lun
while i in used_luns:
i += 1
used_luns.add(i)
# use free lun
info[i] = {
'lun': i,
'createOption': 'attach'
}
d = attach_data_disks_copy.pop(0)
if is_valid_resource_id(d):
info[i]['managedDisk'] = {'id': d}
if data_disk_delete_option:
info[i]['deleteOption'] = data_disk_delete_option if isinstance(data_disk_delete_option, str) \
else data_disk_delete_option.get(info[i]['name'], None)
else:
info[i]['vhd'] = {'uri': d}
info[i]['name'] = d.split('/')[-1].split('.')[0]
if data_disk_delete_option:
info[i]['deleteOption'] = data_disk_delete_option if isinstance(data_disk_delete_option, str) \
else data_disk_delete_option.get(info[i]['name'], None)
# fill in data disk caching
if data_disk_cachings:
update_disk_caching(info, data_disk_cachings)
# default os disk caching to 'ReadWrite' unless set otherwise
if os_disk_caching:
info['os']['caching'] = os_disk_caching
else:
info['os']['caching'] = 'None' if is_lv_size else 'ReadWrite'
# error out on invalid vm sizes
if is_lv_size:
for v in info.values():
if v.get('caching', 'None').lower() != 'none':
raise CLIError('usage error: for Lv series of machines, "None" is the only supported caching mode')
result_info = {'os': info['os']}
# in python 3 insertion order matters during iteration. This ensures that luns are retrieved in numerical order
for key in sorted([key for key in info if key != 'os']):
result_info[key] = info[key]
return result_info
|
def normalize_disk_info(image_data_disks=None,
data_disk_sizes_gb=None, attach_data_disks=None, storage_sku=None,
os_disk_caching=None, data_disk_cachings=None, size='',
ephemeral_os_disk=False, ephemeral_os_disk_placement=None,
data_disk_delete_option=None):
from msrestazure.tools import is_valid_resource_id
from ._validators import validate_delete_options
is_lv_size = re.search('_L[0-9]+s', size, re.I)
# we should return a dictionary with info like below
# {
# 'os': { caching: 'Read', write_accelerator: None},
# 0: { caching: 'None', write_accelerator: True},
# 1: { caching: 'None', write_accelerator: True},
# }
info = {}
used_luns = set()
attach_data_disks = attach_data_disks or []
data_disk_sizes_gb = data_disk_sizes_gb or []
image_data_disks = image_data_disks or []
if data_disk_delete_option:
if attach_data_disks:
data_disk_delete_option = validate_delete_options(attach_data_disks, data_disk_delete_option)
else:
data_disk_delete_option = _validate_vm_data_disk_delete_option(attach_data_disks, data_disk_delete_option)
info['os'] = {}
# update os diff disk settings
if ephemeral_os_disk:
info['os']['diffDiskSettings'] = {'option': 'Local'}
# local os disks require readonly caching, default to ReadOnly if os_disk_caching not specified.
if not os_disk_caching:
os_disk_caching = 'ReadOnly'
if ephemeral_os_disk_placement:
info['os']['diffDiskSettings']['placement'] = ephemeral_os_disk_placement
# add managed image data disks
for data_disk in image_data_disks:
i = data_disk['lun']
info[i] = {
'lun': i,
'managedDisk': {'storageAccountType': None},
'createOption': 'fromImage'
}
used_luns.add(i)
# add empty data disks, do not use existing luns
i = 0
sizes_copy = list(data_disk_sizes_gb)
while sizes_copy:
# get free lun
while i in used_luns:
i += 1
used_luns.add(i)
info[i] = {
'lun': i,
'managedDisk': {'storageAccountType': None},
'createOption': 'empty',
'diskSizeGB': sizes_copy.pop(0),
'deleteOption': data_disk_delete_option if isinstance(data_disk_delete_option, str) else None
}
# update storage skus for managed data disks
if storage_sku is not None:
update_disk_sku_info(info, storage_sku)
# check that os storage account type is not UltraSSD_LRS
if info['os'].get('storageAccountType', "").lower() == 'ultrassd_lrs':
logger.warning("Managed os disk storage account sku cannot be UltraSSD_LRS. Using service default.")
info['os']['storageAccountType'] = None
# add attached data disks
i = 0
attach_data_disks_copy = list(attach_data_disks)
while attach_data_disks_copy:
# get free lun
while i in used_luns:
i += 1
used_luns.add(i)
# use free lun
info[i] = {
'lun': i,
'createOption': 'attach'
}
d = attach_data_disks_copy.pop(0)
if is_valid_resource_id(d):
info[i]['managedDisk'] = {'id': d}
if data_disk_delete_option:
info[i]['deleteOption'] = data_disk_delete_option if isinstance(data_disk_delete_option, str) \
else data_disk_delete_option.get(info[i]['name'], None)
else:
info[i]['vhd'] = {'uri': d}
info[i]['name'] = d.split('/')[-1].split('.')[0]
if data_disk_delete_option:
info[i]['deleteOption'] = data_disk_delete_option if isinstance(data_disk_delete_option, str) \
else data_disk_delete_option.get(info[i]['name'], None)
# fill in data disk caching
if data_disk_cachings:
update_disk_caching(info, data_disk_cachings)
# default os disk caching to 'ReadWrite' unless set otherwise
if os_disk_caching:
info['os']['caching'] = os_disk_caching
else:
info['os']['caching'] = 'None' if is_lv_size else 'ReadWrite'
# error out on invalid vm sizes
if is_lv_size:
for v in info.values():
if v.get('caching', 'None').lower() != 'none':
raise CLIError('usage error: for Lv series of machines, "None" is the only supported caching mode')
result_info = {'os': info['os']}
# in python 3 insertion order matters during iteration. This ensures that luns are retrieved in numerical order
for key in sorted([key for key in info if key != 'os']):
result_info[key] = info[key]
return result_info
|
17,515 |
def _infer_meta_data(ds, x, y, hue, hue_style, add_guide, funcname):
dvars = set(ds.variables.keys())
error_msg = f" must be one of ({', '.join(dvars)})"
if x not in dvars:
raise ValueError(f"x {error_msg}, got {x}")
if y not in dvars:
raise ValueError(f"y {error_msg}, got {y}")
if hue is not None and hue not in dvars:
raise ValueError(f"hue {error_msg}, got {hue}")
if hue:
hue_is_numeric = _is_numeric(ds[hue].values)
if hue_style is None:
hue_style = "continuous" if hue_is_numeric else "discrete"
if not hue_is_numeric and (hue_style == "continuous"):
raise ValueError(
f"Cannot create a colorbar for a non numeric coordinate: {hue}"
)
if add_guide is None or add_guide is True:
add_colorbar = True if hue_style == "continuous" else False
add_legend = True if hue_style == "discrete" else False
else:
add_colorbar = False
add_legend = False
else:
if add_guide is True and funcname not in ("quiver", "streamplot"):
raise ValueError("Cannot set add_guide when hue is None.")
add_legend = False
add_colorbar = False
if (add_guide or add_guide is None) and funcname == "quiver":
add_quiverkey = True
if hue:
add_colorbar = True
if not hue_style:
hue_style = "continuous"
elif hue_style != "continuous":
raise ValueError(
"hue_style must be 'continuous' or None for .plot.quiver or "
".plot.streamplot"
)
else:
add_quiverkey = False
if (add_guide or add_guide is None) and funcname == "streamplot":
if hue:
add_colorbar = True
if not hue_style:
hue_style = "continuous"
elif hue_style != "continuous":
raise ValueError(
"hue_style must be 'continuous' or None for .plot.quiver or "
".plot.streamplot"
)
if hue_style is not None and hue_style not in ["discrete", "continuous"]:
raise ValueError("hue_style must be either None, 'discrete' or 'continuous'.")
if hue:
hue_label = label_from_attrs(ds[hue])
hue = ds[hue]
else:
hue_label = None
hue = None
return {
"add_colorbar": add_colorbar,
"add_legend": add_legend,
"add_quiverkey": add_quiverkey,
"hue_label": hue_label,
"hue_style": hue_style,
"xlabel": label_from_attrs(ds[x]),
"ylabel": label_from_attrs(ds[y]),
"hue": hue,
}
|
def _infer_meta_data(ds, x, y, hue, hue_style, add_guide, funcname):
dvars = set(ds.variables.keys())
error_msg = f" must be one of ({', '.join(dvars)})"
if x not in dvars:
raise ValueError(f"Expected 'x' {error_msg}. Received {x} instead.")
if y not in dvars:
raise ValueError(f"y {error_msg}, got {y}")
if hue is not None and hue not in dvars:
raise ValueError(f"hue {error_msg}, got {hue}")
if hue:
hue_is_numeric = _is_numeric(ds[hue].values)
if hue_style is None:
hue_style = "continuous" if hue_is_numeric else "discrete"
if not hue_is_numeric and (hue_style == "continuous"):
raise ValueError(
f"Cannot create a colorbar for a non numeric coordinate: {hue}"
)
if add_guide is None or add_guide is True:
add_colorbar = True if hue_style == "continuous" else False
add_legend = True if hue_style == "discrete" else False
else:
add_colorbar = False
add_legend = False
else:
if add_guide is True and funcname not in ("quiver", "streamplot"):
raise ValueError("Cannot set add_guide when hue is None.")
add_legend = False
add_colorbar = False
if (add_guide or add_guide is None) and funcname == "quiver":
add_quiverkey = True
if hue:
add_colorbar = True
if not hue_style:
hue_style = "continuous"
elif hue_style != "continuous":
raise ValueError(
"hue_style must be 'continuous' or None for .plot.quiver or "
".plot.streamplot"
)
else:
add_quiverkey = False
if (add_guide or add_guide is None) and funcname == "streamplot":
if hue:
add_colorbar = True
if not hue_style:
hue_style = "continuous"
elif hue_style != "continuous":
raise ValueError(
"hue_style must be 'continuous' or None for .plot.quiver or "
".plot.streamplot"
)
if hue_style is not None and hue_style not in ["discrete", "continuous"]:
raise ValueError("hue_style must be either None, 'discrete' or 'continuous'.")
if hue:
hue_label = label_from_attrs(ds[hue])
hue = ds[hue]
else:
hue_label = None
hue = None
return {
"add_colorbar": add_colorbar,
"add_legend": add_legend,
"add_quiverkey": add_quiverkey,
"hue_label": hue_label,
"hue_style": hue_style,
"xlabel": label_from_attrs(ds[x]),
"ylabel": label_from_attrs(ds[y]),
"hue": hue,
}
|
48,572 |
def merge(
datasets,
merge_points=True,
main_has_priority=True,
progress_bar=False
):
"""Merge several datasets.
.. note::
The behavior of this filter varies from the
:func:`PolyDataFilters.boolean_union` filter. This filter
does not attempt to create a manifold mesh and will include
internal surfaces when two meshes overlap.
datasets : sequence of :class:`pyvista.Dataset`
Sequence of datasets. Can be of any :class:`pyvista.Dataset`
merge_points : bool, optional
Merge equivalent points when ``True``. Defaults to ``True``.
main_has_priority : bool, optional
When this parameter is ``True`` and ``merge_points=True``,
the arrays of the merging grids will be overwritten
by the original main mesh.
main_has_priority : bool, optional
When this parameter is ``True`` and ``merge_points=True``,
the arrays of the merging grids will be overwritten
by the original main mesh.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Returns
-------
pyvista.DataSet
:class:`pyvista.PolyData` if all items in datasets are
:class:`pyvista.PolyData`, otherwise returns a
:class:`pyvista.UnstructuredGrid`.
Examples
--------
Merge two polydata datasets.
>>> import pyvista
>>> sphere = pyvista.Sphere(center=(0, 0, 1))
>>> cube = pyvista.Cube()
>>> mesh = pyvista.merge([cube, sphere])
>>> mesh.plot()
"""
if not isinstance(datasets, collections.Sequence):
raise TypeError(f"Expected a sequence, got {type(datasets)}")
if len(datasets) < 1:
raise ValueError("Expected at least one dataset.")
first = datasets[0]
if not isinstance(first, pyvista.DataSet):
raise TypeError(f"Expected pyvista.DataSet, not {type(first)}")
return datasets[0].merge(
datasets[1:],
merge_points=merge_points,
main_has_priority=main_has_priority,
progress_bar=progress_bar,
)
|
def merge(
datasets,
merge_points=True,
main_has_priority=True,
progress_bar=False
):
"""Merge several datasets.
.. note::
The behavior of this filter varies from the
:func:`PolyDataFilters.boolean_union` filter. This filter
does not attempt to create a manifold mesh and will include
internal surfaces when two meshes overlap.
datasets : sequence of :class:`pyvista.Dataset`
Sequence of datasets. Can be of any :class:`pyvista.Dataset`
merge_points : bool, optional
Merge equivalent points when ``True``. Defaults to ``True``.
main_has_priority : bool, optional
When this parameter is ``True`` and ``merge_points=True``,
the arrays of the merging grids will be overwritten
by the original main mesh.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Returns
-------
pyvista.DataSet
:class:`pyvista.PolyData` if all items in datasets are
:class:`pyvista.PolyData`, otherwise returns a
:class:`pyvista.UnstructuredGrid`.
Examples
--------
Merge two polydata datasets.
>>> import pyvista
>>> sphere = pyvista.Sphere(center=(0, 0, 1))
>>> cube = pyvista.Cube()
>>> mesh = pyvista.merge([cube, sphere])
>>> mesh.plot()
"""
if not isinstance(datasets, collections.Sequence):
raise TypeError(f"Expected a sequence, got {type(datasets)}")
if len(datasets) < 1:
raise ValueError("Expected at least one dataset.")
first = datasets[0]
if not isinstance(first, pyvista.DataSet):
raise TypeError(f"Expected pyvista.DataSet, not {type(first)}")
return datasets[0].merge(
datasets[1:],
merge_points=merge_points,
main_has_priority=main_has_priority,
progress_bar=progress_bar,
)
|
56,343 |
def distance_matrix(stream_list, shift_len=0.0,
allow_individual_trace_shifts=True, cores=1):
"""
Compute distance matrix for waveforms based on cross-correlations.
Function to compute the distance matrix for all templates - will give
distance as 1-abs(cccoh), e.g. a well correlated pair of templates will
have small distances, and an equally well correlated reverse image will
have the same distance as a positively correlated image - this is an issue.
:type stream_list: list
:param stream_list:
List of the :class:`obspy.core.stream.Stream` to compute the distance
matrix for
:type shift_len: float
:param shift_len: How many seconds for templates to shift
:type allow_individual_trace_shifts: bool
:param allow_individual_trace_shifts:
Controls whether templates are shifted by shift_len in relation to the
picks as a whole, or whether each trace can be shifted individually.
Defaults to True.
:type cores: int
:param cores: Number of cores to parallel process using, defaults to 1.
:returns: distance matrix
:rtype: :class:`numpy.ndarray`
.. warning::
Because distance is given as :math:`1-abs(coherence)`, negatively
correlated and positively correlated objects are given the same
distance.
.. note::
Requires all traces to have the same sampling rate and same length.
"""
allow_individual_trace_shifts =\
allow_individual_trace_shifts and shift_len > 0
# Initialize square matrix
dist_mat = np.array([np.array([0.0] * len(stream_list))] *
len(stream_list))
shift_mat = np.zeros_like(dist_mat)
shift_mat = np.zeros([len(stream_list),
len(stream_list),
max([len(st) for st in stream_list])])
n_shifts_per_stream = 1
for i, master in enumerate(stream_list):
dist_list, shift_list = cross_chan_correlation(
st1=master, streams=stream_list, shift_len=shift_len,
allow_individual_trace_shifts=allow_individual_trace_shifts,
xcorr_func='fftw', cores=cores)
dist_mat[i] = 1 - dist_list
if allow_individual_trace_shifts:
n_shifts_per_stream = shift_list.shape[1]
shift_mat[i, 0:, 0:n_shifts_per_stream] = shift_list
else:
shift_mat[i, 0:, 0:n_shifts_per_stream] = shift_list
if shift_len == 0:
assert np.allclose(dist_mat, dist_mat.T, atol=0.00001)
# Force perfect symmetry
dist_mat = (dist_mat + dist_mat.T) / 2
shift_mat = shift_mat[:, :, 0:n_shifts_per_stream].squeeze()
else:
# get the shortest distance for each correlation pair
dist_mat_shortest = np.minimum(dist_mat, dist_mat.T)
# Indicator says which matrix has shortest dist: value 0: mat2; 1: mat1
mat_indicator = dist_mat_shortest == dist_mat
mat_indicator = np.repeat(mat_indicator[:, :, np.newaxis],
n_shifts_per_stream, axis=2)[:, :]
# Get shift for the shortest distances
shift_mat = shift_mat[:, :, 0:n_shifts_per_stream][:, :]
shift_mat = shift_mat * mat_indicator +\
np.transpose(shift_mat, [1, 0, 2]) * (1 - mat_indicator)
dist_mat = dist_mat_shortest
np.fill_diagonal(dist_mat, 0)
return dist_mat, shift_mat.squeeze()
|
def distance_matrix(stream_list, shift_len=0.0,
allow_individual_trace_shifts=True, cores=1):
"""
Compute distance matrix for waveforms based on cross-correlations.
Function to compute the distance matrix for all templates - will give
distance as 1-abs(cccoh), e.g. a well correlated pair of templates will
have small distances, and an equally well correlated reverse image will
have the same distance as a positively correlated image - this is an issue.
:type stream_list: list
:param stream_list:
List of the :class:`obspy.core.stream.Stream` to compute the distance
matrix for
:type shift_len: float
:param shift_len:
Seconds to shift the streams by (total value for negative and positive
direction together)
:type allow_individual_trace_shifts: bool
:param allow_individual_trace_shifts:
Controls whether templates are shifted by shift_len in relation to the
picks as a whole, or whether each trace can be shifted individually.
Defaults to True.
:type cores: int
:param cores: Number of cores to parallel process using, defaults to 1.
:returns: distance matrix
:rtype: :class:`numpy.ndarray`
.. warning::
Because distance is given as :math:`1-abs(coherence)`, negatively
correlated and positively correlated objects are given the same
distance.
.. note::
Requires all traces to have the same sampling rate and same length.
"""
allow_individual_trace_shifts =\
allow_individual_trace_shifts and shift_len > 0
# Initialize square matrix
dist_mat = np.array([np.array([0.0] * len(stream_list))] *
len(stream_list))
shift_mat = np.zeros_like(dist_mat)
shift_mat = np.zeros([len(stream_list),
len(stream_list),
max([len(st) for st in stream_list])])
n_shifts_per_stream = 1
for i, master in enumerate(stream_list):
dist_list, shift_list = cross_chan_correlation(
st1=master, streams=stream_list, shift_len=shift_len,
allow_individual_trace_shifts=allow_individual_trace_shifts,
xcorr_func='fftw', cores=cores)
dist_mat[i] = 1 - dist_list
if allow_individual_trace_shifts:
n_shifts_per_stream = shift_list.shape[1]
shift_mat[i, 0:, 0:n_shifts_per_stream] = shift_list
else:
shift_mat[i, 0:, 0:n_shifts_per_stream] = shift_list
if shift_len == 0:
assert np.allclose(dist_mat, dist_mat.T, atol=0.00001)
# Force perfect symmetry
dist_mat = (dist_mat + dist_mat.T) / 2
shift_mat = shift_mat[:, :, 0:n_shifts_per_stream].squeeze()
else:
# get the shortest distance for each correlation pair
dist_mat_shortest = np.minimum(dist_mat, dist_mat.T)
# Indicator says which matrix has shortest dist: value 0: mat2; 1: mat1
mat_indicator = dist_mat_shortest == dist_mat
mat_indicator = np.repeat(mat_indicator[:, :, np.newaxis],
n_shifts_per_stream, axis=2)[:, :]
# Get shift for the shortest distances
shift_mat = shift_mat[:, :, 0:n_shifts_per_stream][:, :]
shift_mat = shift_mat * mat_indicator +\
np.transpose(shift_mat, [1, 0, 2]) * (1 - mat_indicator)
dist_mat = dist_mat_shortest
np.fill_diagonal(dist_mat, 0)
return dist_mat, shift_mat.squeeze()
|
1,972 |
def _fit_multiplicative_update(X, W, H, beta_loss='frobenius',
max_iter=200, tol=1e-4,
l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0,
update_H=True, verbose=0):
"""Compute Non-negative Matrix Factorization with Multiplicative Update
The objective function is _beta_divergence(X, WH) and is minimized with an
alternating minimization of W and H. Each minimization is done with a
Multiplicative Update.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Constant input matrix.
W : array-like of shape (n_samples, n_components)
Initial guess for the solution.
H : array-like of shape (n_components, n_features)
Initial guess for the solution.
beta_loss : float or string in {'frobenius', 'kullback-leibler', \
'itakura-saito'}, default='frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros.
max_iter : int, default=200
Number of iterations.
tol : float, default=1e-4
Tolerance of the stopping condition.
l1_reg_W : float, default=0.
L1 regularization parameter for W.
l1_reg_H : float, default=0.
L1 regularization parameter for H.
l2_reg_W : float, default=0.
L2 regularization parameter for W.
l2_reg_H : float, default=0.
L2 regularization parameter for H.
update_H : bool, default=True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : int, default=0
The verbosity level.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : ndarray of shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
start_time = time.time()
beta_loss = _beta_loss_to_float(beta_loss)
# gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
if beta_loss < 1:
gamma = 1. / (2. - beta_loss)
elif beta_loss > 2:
gamma = 1. / (beta_loss - 1.)
else:
gamma = 1.
# used for the convergence criterion
error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True)
previous_error = error_at_init
H_sum, HHt, XHt = None, None, None
for n_iter in range(1, max_iter + 1):
# update W
# H_sum, HHt and XHt are saved and reused if not update_H
delta_W, H_sum, HHt, XHt = _multiplicative_update_w(
X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,
H_sum, HHt, XHt, update_H)
W *= delta_W
# necessary for stability with beta_loss < 1
if beta_loss < 1:
W[W < np.finfo(np.float64).eps] = 0.
# update H
if update_H:
delta_H = _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H,
l2_reg_H, gamma)
H *= delta_H
# These values will be recomputed since H changed
H_sum, HHt, XHt = None, None, None
# necessary for stability with beta_loss < 1
if beta_loss <= 1:
H[H < np.finfo(np.float64).eps] = 0.
# test convergence criterion every 10 iterations
if tol > 0 and n_iter % 10 == 0:
error = _beta_divergence(X, W, H, beta_loss, square_root=True)
if verbose:
iter_time = time.time()
print("Epoch %02d reached after %.3f seconds, error: %f" %
(n_iter, iter_time - start_time, error))
if (previous_error - error) / error_at_init < tol:
break
previous_error = error
# do not print if we have already printed in the convergence test
if verbose and (tol == 0 or n_iter % 10 != 0):
end_time = time.time()
print("Epoch %02d reached after %.3f seconds." %
(n_iter, end_time - start_time))
return W, H, n_iter
|
def _fit_multiplicative_update(X, W, H, beta_loss='frobenius',
max_iter=200, tol=1e-4,
l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0,
update_H=True, verbose=0):
"""Compute Non-negative Matrix Factorization with Multiplicative Update
The objective function is _beta_divergence(X, WH) and is minimized with an
alternating minimization of W and H. Each minimization is done with a
Multiplicative Update.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Constant input matrix.
W : array-like of shape (n_samples, n_components)
Initial guess for the solution.
H : array-like of shape (n_components, n_features)
Initial guess for the solution.
beta_loss : float or {'frobenius', 'kullback-leibler', \
'itakura-saito'}, default='frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros.
max_iter : int, default=200
Number of iterations.
tol : float, default=1e-4
Tolerance of the stopping condition.
l1_reg_W : float, default=0.
L1 regularization parameter for W.
l1_reg_H : float, default=0.
L1 regularization parameter for H.
l2_reg_W : float, default=0.
L2 regularization parameter for W.
l2_reg_H : float, default=0.
L2 regularization parameter for H.
update_H : bool, default=True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : int, default=0
The verbosity level.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : ndarray of shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
start_time = time.time()
beta_loss = _beta_loss_to_float(beta_loss)
# gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
if beta_loss < 1:
gamma = 1. / (2. - beta_loss)
elif beta_loss > 2:
gamma = 1. / (beta_loss - 1.)
else:
gamma = 1.
# used for the convergence criterion
error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True)
previous_error = error_at_init
H_sum, HHt, XHt = None, None, None
for n_iter in range(1, max_iter + 1):
# update W
# H_sum, HHt and XHt are saved and reused if not update_H
delta_W, H_sum, HHt, XHt = _multiplicative_update_w(
X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,
H_sum, HHt, XHt, update_H)
W *= delta_W
# necessary for stability with beta_loss < 1
if beta_loss < 1:
W[W < np.finfo(np.float64).eps] = 0.
# update H
if update_H:
delta_H = _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H,
l2_reg_H, gamma)
H *= delta_H
# These values will be recomputed since H changed
H_sum, HHt, XHt = None, None, None
# necessary for stability with beta_loss < 1
if beta_loss <= 1:
H[H < np.finfo(np.float64).eps] = 0.
# test convergence criterion every 10 iterations
if tol > 0 and n_iter % 10 == 0:
error = _beta_divergence(X, W, H, beta_loss, square_root=True)
if verbose:
iter_time = time.time()
print("Epoch %02d reached after %.3f seconds, error: %f" %
(n_iter, iter_time - start_time, error))
if (previous_error - error) / error_at_init < tol:
break
previous_error = error
# do not print if we have already printed in the convergence test
if verbose and (tol == 0 or n_iter % 10 != 0):
end_time = time.time()
print("Epoch %02d reached after %.3f seconds." %
(n_iter, end_time - start_time))
return W, H, n_iter
|
31,715 |
def lower_keys(response: dict):
"""
Lowers firsts letter of all keys in the dictionary given and returns the new dictionary.
"""
demisto.debug(f'lowering keys for response: {response}')
return dict((decapitalize(key), value) for (key, value) in response.items())
|
def keys_to_lowercase(response: dict):
"""
Lowers firsts letter of all keys in the dictionary given and returns the new dictionary.
"""
demisto.debug(f'lowering keys for response: {response}')
return dict((decapitalize(key), value) for (key, value) in response.items())
|
56,046 |
def main():
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
# set up weights and biases if available
if is_wandb_available() and args.wandb:
import wandb
wandb.init(project=args.output_dir.split("/")[-1])
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Load dataset
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# TODO support datasets from local folders
dataset = load_dataset(args.dataset_name, cache_dir=args.cache_dir)
# Rename column names to standardized names (only "image" and "label" need to be present)
if "pixel_values" in dataset["train"].column_names:
dataset = dataset.rename_columns({"pixel_values": "image"})
if "annotation" in dataset["train"].column_names:
dataset = dataset.rename_columns({"annotation": "label"})
# If we don't have a validation split, split off a percentage of train as validation.
args.train_val_split = None if "validation" in dataset.keys() else args.train_val_split
if isinstance(args.train_val_split, float) and args.train_val_split > 0.0:
split = dataset["train"].train_test_split(args.train_val_split)
dataset["train"] = split["train"]
dataset["validation"] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
if args.dataset_name == "scene_parse_150":
repo_id = "datasets/huggingface/label-files"
filename = "ade20k-id2label.json"
num_labels = 150
else:
repo_id = f"datasets/{args.dataset_name}"
filename = "id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename), "r"))
id2label = {int(k): v for k, v in id2label.items()}
label2id = {v: k for k, v in id2label.items()}
num_labels = len(id2label)
# Load pretrained model and feature extractor
config = AutoConfig.from_pretrained(
args.model_name_or_path, num_labels=num_labels, id2label=id2label, label2id=label2id
)
feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_name_or_path)
model = AutoModelForSemanticSegmentation.from_pretrained(
args.model_name_or_path,
config=config,
)
# Preprocessing the datasets
# Define torchvision transforms to be applied to each image + target.
# Not that straightforward in torchvision: https://github.com/pytorch/vision/issues/9
# Currently based on official torchvision references: https://github.com/pytorch/vision/blob/main/references/segmentation/transforms.py
_train_transforms = Compose(
[
ReduceLabels() if args.reduce_labels else Identity(),
RandomCrop(size=feature_extractor.size),
RandomHorizontalFlip(flip_prob=0.5),
PILToTensor(),
ConvertImageDtype(torch.float),
Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std),
]
)
# Define torchvision transform to be applied to each image.
# jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)
_val_transforms = Compose(
[
ReduceLabels() if args.reduce_labels else Identity(),
Resize(size=(feature_extractor.size, feature_extractor.size)),
PILToTensor(),
ConvertImageDtype(torch.float),
Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std),
]
)
def train_transforms(example_batch):
pixel_values = []
labels = []
for image, target in zip(example_batch["image"], example_batch["label"]):
image, target = _train_transforms(image.convert("RGB"), target)
pixel_values.append(image)
labels.append(target)
encoding = dict()
encoding["pixel_values"] = torch.stack(pixel_values)
encoding["labels"] = torch.stack(labels)
return encoding
def val_transforms(example_batch):
pixel_values = []
labels = []
for image, target in zip(example_batch["image"], example_batch["label"]):
image, target = _val_transforms(image.convert("RGB"), target)
pixel_values.append(image)
labels.append(target)
encoding = dict()
encoding["pixel_values"] = torch.stack(pixel_values)
encoding["labels"] = torch.stack(labels)
return encoding
with accelerator.main_process_first():
train_dataset = dataset["train"].with_transform(train_transforms)
eval_dataset = dataset["validation"].with_transform(val_transforms)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(
eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size
)
# Optimizer
optimizer = torch.optim.AdamW(
list(model.parameters()),
lr=args.learning_rate,
betas=[args.adam_beta1, args.adam_beta2],
eps=args.adam_epsilon,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Instantiate metric
metric = load_metric("mean_iou")
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if completed_steps >= args.max_train_steps:
break
# Log all results
if (step + 1) % (args.gradient_accumulation_steps * args.logging_steps) == 0:
loss.detach()
if accelerator.state.num_processes > 1:
loss = accelerator.gather(loss).sum() / accelerator.num_processes
train_logs = {
"loss": loss,
"lr": torch.tensor(optimizer.param_groups[0]["lr"]),
}
# Evaluate (gather required)
with torch.no_grad():
upsampled_logits = torch.nn.functional.interpolate(
outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False
)
predictions = upsampled_logits.argmax(dim=1)
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
train_metrics = metric.compute(
num_labels=len(id2label),
ignore_index=255,
reduce_labels=False, # we've already reduced the labels before
)
train_logs["mean_iou"] = train_metrics["mean_iou"]
train_logs["mean_accuracy"] = train_metrics["mean_accuracy"]
train_logs["overall_accuracy"] = train_metrics["overall_accuracy"]
log_str = ""
for k, v in train_logs.items():
if isinstance(v, torch.Tensor):
log_str += "| {}: {:.3e}".format(k, v.item())
else:
log_str += "| {}: {:.3e}".format(k, v)
if accelerator.is_local_main_process:
progress_bar.write(log_str)
if is_wandb_available() and args.wandb:
wandb.log(train_logs)
# Save model every `args.saving_steps` steps
if (step + 1) % (args.gradient_accumulation_steps * args.saving_steps) == 0:
if (args.push_to_hub and epoch < args.num_train_epochs - 1) or args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if (args.push_to_hub and epoch < args.num_train_epochs - 1) and accelerator.is_main_process:
repo.push_to_hub(
commit_message=f"Training in progress step {completed_steps}",
blocking=False,
auto_lfs_prune=True,
)
logger.info("***** Running evaluation *****")
model.eval()
for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)):
outputs = model(**batch)
upsampled_logits = torch.nn.functional.interpolate(
outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False
)
predictions = upsampled_logits.argmax(dim=1)
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
eval_metrics = metric.compute(
num_labels=len(id2label),
ignore_index=255,
reduce_labels=False, # we've already reduced the labels before
)
logger.info(f"epoch {epoch}: {eval_metrics}")
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
feature_extractor.save_pretrained(args.output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
feature_extractor.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
|
def main():
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
# set up weights and biases if available
if is_wandb_available() and args.wandb:
import wandb
wandb.init(project=args.output_dir.split("/")[-1])
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Load dataset
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# TODO support datasets from local folders
dataset = load_dataset(args.dataset_name, cache_dir=args.cache_dir)
# Rename column names to standardized names (only "image" and "label" need to be present)
if "pixel_values" in dataset["train"].column_names:
dataset = dataset.rename_columns({"pixel_values": "image"})
if "annotation" in dataset["train"].column_names:
dataset = dataset.rename_columns({"annotation": "label"})
# If we don't have a validation split, split off a percentage of train as validation.
args.train_val_split = None if "validation" in dataset.keys() else args.train_val_split
if isinstance(args.train_val_split, float) and args.train_val_split > 0.0:
split = dataset["train"].train_test_split(args.train_val_split)
dataset["train"] = split["train"]
dataset["validation"] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
if args.dataset_name == "scene_parse_150":
repo_id = "datasets/huggingface/label-files"
filename = "ade20k-id2label.json"
num_labels = 150
else:
repo_id = f"datasets/{args.dataset_name}"
filename = "id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename), "r"))
id2label = {int(k): v for k, v in id2label.items()}
label2id = {v: k for k, v in id2label.items()}
num_labels = len(id2label)
# Load pretrained model and feature extractor
config = AutoConfig.from_pretrained(
args.model_name_or_path, num_labels=num_labels, id2label=id2label, label2id=label2id
)
feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_name_or_path)
model = AutoModelForSemanticSegmentation.from_pretrained(
args.model_name_or_path,
config=config,
)
# Preprocessing the datasets
# Define torchvision transforms to be applied to each image + target.
# Not that straightforward in torchvision: https://github.com/pytorch/vision/issues/9
# Currently based on official torchvision references: https://github.com/pytorch/vision/blob/main/references/segmentation/transforms.py
_train_transforms = Compose(
[
ReduceLabels() if args.reduce_labels else Identity(),
RandomCrop(size=feature_extractor.size),
RandomHorizontalFlip(flip_prob=0.5),
PILToTensor(),
ConvertImageDtype(torch.float),
Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std),
]
)
# Define torchvision transform to be applied to each image.
# jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)
_val_transforms = Compose(
[
ReduceLabels() if args.reduce_labels else Identity(),
Resize(size=(feature_extractor.size, feature_extractor.size)),
PILToTensor(),
ConvertImageDtype(torch.float),
Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std),
]
)
def train_transforms(example_batch):
pixel_values = []
labels = []
for image, target in zip(example_batch["image"], example_batch["label"]):
image, target = _train_transforms(image.convert("RGB"), target)
pixel_values.append(image)
labels.append(target)
encoding = dict()
encoding["pixel_values"] = torch.stack(pixel_values)
encoding["labels"] = torch.stack(labels)
return encoding
def val_transforms(example_batch):
pixel_values = []
labels = []
for image, target in zip(example_batch["image"], example_batch["label"]):
image, target = _val_transforms(image.convert("RGB"), target)
pixel_values.append(image)
labels.append(target)
encoding = dict()
encoding["pixel_values"] = torch.stack(pixel_values)
encoding["labels"] = torch.stack(labels)
return encoding
with accelerator.main_process_first():
train_dataset = dataset["train"].with_transform(train_transforms)
eval_dataset = dataset["validation"].with_transform(val_transforms)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(
eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size
)
# Optimizer
optimizer = torch.optim.AdamW(
list(model.parameters()),
lr=args.learning_rate,
betas=[args.adam_beta1, args.adam_beta2],
eps=args.adam_epsilon,
)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Instantiate metric
metric = load_metric("mean_iou")
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if completed_steps >= args.max_train_steps:
break
# Log all results
if (step + 1) % (args.gradient_accumulation_steps * args.logging_steps) == 0:
loss.detach()
if accelerator.state.num_processes > 1:
loss = accelerator.gather(loss).sum() / accelerator.num_processes
train_logs = {
"loss": loss,
"lr": torch.tensor(optimizer.param_groups[0]["lr"]),
}
# Evaluate (gather required)
with torch.no_grad():
upsampled_logits = torch.nn.functional.interpolate(
outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False
)
predictions = upsampled_logits.argmax(dim=1)
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
train_metrics = metric.compute(
num_labels=len(id2label),
ignore_index=255,
reduce_labels=False, # we've already reduced the labels before
)
train_logs["mean_iou"] = train_metrics["mean_iou"]
train_logs["mean_accuracy"] = train_metrics["mean_accuracy"]
train_logs["overall_accuracy"] = train_metrics["overall_accuracy"]
log_str = ""
for k, v in train_logs.items():
if isinstance(v, torch.Tensor):
log_str += "| {}: {:.3e}".format(k, v.item())
else:
log_str += "| {}: {:.3e}".format(k, v)
if accelerator.is_local_main_process:
progress_bar.write(log_str)
if is_wandb_available() and args.wandb:
wandb.log(train_logs)
# Save model every `args.saving_steps` steps
if (step + 1) % (args.gradient_accumulation_steps * args.saving_steps) == 0:
if (args.push_to_hub and epoch < args.num_train_epochs - 1) or args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if (args.push_to_hub and epoch < args.num_train_epochs - 1) and accelerator.is_main_process:
repo.push_to_hub(
commit_message=f"Training in progress step {completed_steps}",
blocking=False,
auto_lfs_prune=True,
)
logger.info("***** Running evaluation *****")
model.eval()
for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)):
outputs = model(**batch)
upsampled_logits = torch.nn.functional.interpolate(
outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False
)
predictions = upsampled_logits.argmax(dim=1)
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
eval_metrics = metric.compute(
num_labels=len(id2label),
ignore_index=255,
reduce_labels=False, # we've already reduced the labels before
)
logger.info(f"epoch {epoch}: {eval_metrics}")
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
feature_extractor.save_pretrained(args.output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
feature_extractor.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
|
56,270 |
def reverse_crop_pad_resize(
xyxys: np.ndarray,
pad_img_size: tuple,
dsize: int,
pad_left: int,
pad_top: int,
roi_xyxy: np.ndarray):
'''
reversal of crop_pad_resize
xyxys: bboxes in xyxy format
pad_img_size, pad_left, pad_top are from outputs of crop_pad_resize()
dsize is the resize params used in crop_pad_resize()
roi_xyxy is the ROI used to crop original image
'''
## resize & un-pad bboxes back to padded image
hp, wp = pad_img_size
scalex, scaley = dsize / wp, dsize / hp
xyxys[:, 0: : 2] = np.clip(xyxys[:, 0: : 2] / scalex - pad_left, 0, wp)
xyxys[:, 1: : 2] = np.clip(xyxys[:, 1: : 2] / scaley - pad_top, 0, hp)
## un-crop
offsetx, offsety = roi_xyxy[: 2]
xyxys[:, 0: : 2] += offsetx
xyxys[:, 1: : 2] += offsety
return xyxys
|
def reverse_crop_pad_resize(
xyxys: np.ndarray,
pad_img_size: tuple,
dsize: int,
pad_left: int,
pad_top: int,
roi_xyxy: np.ndarray):
'''
reversal of crop_pad_resize
xyxys: bboxes in xyxy format
pad_img_size, pad_left, pad_top are from outputs of crop_pad_resize()
dsize is the resize params used in crop_pad_resize()
roi_xyxy is the ROI used to crop original image
'''
## resize & un-pad bboxes back to padded image
hp, wp = pad_img_size
scalex, scaley = dsize / wp, dsize / hp
xyxys[:, 0: : 2] = np.clip(xyxys[:, 0: : 2] / scalex - pad_left, 0, wp)
xyxys[:, 1: : 2] = np.clip(xyxys[:, 1: : 2] / scaley - pad_top, 0, hp)
## un-crop
offsetx, offsety = roi_xyxy[: 2]
xyxys[:, 0: : 2] += offsetx
xyxys[:, 1: : 2] += offsety
return xyxys
|
32,764 |
def _server_constructor_interceptor(wrapped, instance, args, kwargs):
# DEV: we clone the pin on the grpc module and configure it for the server
# interceptor
pin = Pin.get_from(grpc)
if not pin:
return wrapped(*args, **kwargs)
interceptor = create_server_interceptor(pin)
if 'interceptors' in kwargs:
kwargs['interceptors'] = [interceptor] + kwargs['interceptors']
else:
kwargs['interceptors'] = [interceptor]
server = wrapped(*args, **kwargs)
return server
|
def _client_channel_interceptor(wrapped, instance, args, kwargs):
channel = wrapped(*args, **kwargs)
(host, port) = _parse_target_from_arguments(args, kwargs)
# DEV: we clone the pin on the grpc module and configure it for the client
# interceptor
pin = Pin.get_from(grpc)
if not pin:
return channel
tags = {
'grpc.host': host,
'grpc.port': port,
}
if pin and pin.tags:
tags.update(pin.tags)
pin = pin.clone(tags=tags)
channel = grpc.intercept_channel(channel, create_client_interceptor(pin))
return channel
return wrapped(*args, **kwargs)
# DEV: we clone the pin on the grpc module and configure it for the server
# interceptor
pin = Pin.get_from(grpc)
if not pin:
return wrapped(*args, **kwargs)
interceptor = create_server_interceptor(pin)
if 'interceptors' in kwargs:
kwargs['interceptors'] = [interceptor] + kwargs['interceptors']
else:
kwargs['interceptors'] = [interceptor]
server = wrapped(*args, **kwargs)
return server
|
30,255 |
def download_count_command():
LOG("GitHub: How many downloads were in repo: %s/%s." % (OWNER, REPO))
res = http_request("GET", "/releases")
if (res == []):
demisto.results("There were no dowloads for the repository %s/%s" % (OWNER, REPO))
else:
counter = 0
for release in res:
for asset in release.get('assets'):
counter = counter + asset.get("download_count")
demisto.results("There were %d dowloads for the repository %s/%s" % (counter, OWNER, REPO))
|
def download_count_command():
LOG("GitHub: How many downloads were in repo: %s/%s." % (OWNER, REPO))
res = http_request("GET", "/releases")
if (res == []):
demisto.results("There were no downloads for the repository %s/%s" % (OWNER, REPO))
else:
counter = 0
for release in res:
for asset in release.get('assets'):
counter = counter + asset.get("download_count")
demisto.results("There were %d dowloads for the repository %s/%s" % (counter, OWNER, REPO))
|
1,578 |
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
# Validation of the arguments
if n_classes == 0 and not allow_unlabeled:
raise ValueError(
"Invalid set of arguments passed: " +
"n_classes = 0 and allow_unlabeled = False"
)
if length == 0:
raise ValueError("Invalid argument passed: length = 0")
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
|
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
# Validation of the arguments
if n_classes == 0 and not allow_unlabeled:
raise ValueError(
"Invalid set of arguments passed: " +
"n_classes = 0 and allow_unlabeled = False"
)
if length < 1:
raise ValueError("Invalid argument passed: length = 0")
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
|
17,875 |
def _mantel_stats_pearson_flat(x, y_flat, permutations):
"""Compute original and permuted stats using pearsonr.
Parameters
----------
x : DistanceMatrix
Input distance matrix.
y_flat: 1D array
Compact representation of a distance matrix.
permutations : int
Number of times to randomly permute `x` when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and
permuted_stats will be an empty array.
Returns
-------
orig_stat : 1D array_like
Correlation coefficient of the test.
permuted_stats : 1D array_like
Permutted correlation coefficients of the test.
"""
x_flat = x.condensed_form()
# If an input is constant, the correlation coefficient is not defined.
if (x_flat == x_flat[0]).all() or (y_flat == y_flat[0]).all():
warnings.warn(PearsonRConstantInputWarning())
return np.nan, []
# inline pearsonr, condensed from scipy.stats.pearsonr
xmean = x_flat.mean()
xm = x_flat - xmean
normxm = np.linalg.norm(xm)
xm_normalized = xm/normxm
del xm
del x_flat
ymean = y_flat.mean()
ym = y_flat - ymean
normym = np.linalg.norm(ym)
ym_normalized = ym/normym
del ym
threshold = 1e-13
if (((normxm < threshold*abs(xmean)) or
(normym < threshold*abs(ymean)))):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn(PearsonRNearConstantInputWarning())
orig_stat = np.dot(xm_normalized, ym_normalized)
# Presumably, if abs(orig_stat) > 1, then it is only some small artifact of
# floating point arithmetic.
orig_stat = max(min(orig_stat, 1.0), -1.0)
mat_n = x._data.shape[0]
# note: xmean and normxm do not change with permutations
permuted_stats = []
if not (permutations == 0 or np.isnan(orig_stat)):
# inline DistanceMatrix.permute, grouping them together
x_data = x._data
if not x_data.flags.c_contiguous:
x_data = np.asarray(x_data, order='C')
# compute all pearsonr permutations at once
# create first the list of permutations
perm_order = np.empty([permutations, mat_n], dtype=np.int)
for row in range(permutations):
perm_order[row, :] = np.random.permutation(mat_n)
permuted_stats = np.empty([permutations], dtype=x_data.dtype)
mantel_perm_pearsonr_cy(x_data, perm_order, xmean, normxm,
ym_normalized, permuted_stats)
return orig_stat, permuted_stats
|
def _mantel_stats_pearson_flat(x, y_flat, permutations):
"""Compute original and permuted stats using pearsonr.
Parameters
----------
x : DistanceMatrix
Input distance matrix.
y_flat: 1D array
Compact representation of a distance matrix.
permutations : int
Number of times to randomly permute `x` when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and
permuted_stats will be an empty array.
Returns
-------
orig_stat : 1D array_like
Correlation coefficient of the test.
permuted_stats : 1D array_like
Permuted correlation coefficients of the test.
"""
x_flat = x.condensed_form()
# If an input is constant, the correlation coefficient is not defined.
if (x_flat == x_flat[0]).all() or (y_flat == y_flat[0]).all():
warnings.warn(PearsonRConstantInputWarning())
return np.nan, []
# inline pearsonr, condensed from scipy.stats.pearsonr
xmean = x_flat.mean()
xm = x_flat - xmean
normxm = np.linalg.norm(xm)
xm_normalized = xm/normxm
del xm
del x_flat
ymean = y_flat.mean()
ym = y_flat - ymean
normym = np.linalg.norm(ym)
ym_normalized = ym/normym
del ym
threshold = 1e-13
if (((normxm < threshold*abs(xmean)) or
(normym < threshold*abs(ymean)))):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn(PearsonRNearConstantInputWarning())
orig_stat = np.dot(xm_normalized, ym_normalized)
# Presumably, if abs(orig_stat) > 1, then it is only some small artifact of
# floating point arithmetic.
orig_stat = max(min(orig_stat, 1.0), -1.0)
mat_n = x._data.shape[0]
# note: xmean and normxm do not change with permutations
permuted_stats = []
if not (permutations == 0 or np.isnan(orig_stat)):
# inline DistanceMatrix.permute, grouping them together
x_data = x._data
if not x_data.flags.c_contiguous:
x_data = np.asarray(x_data, order='C')
# compute all pearsonr permutations at once
# create first the list of permutations
perm_order = np.empty([permutations, mat_n], dtype=np.int)
for row in range(permutations):
perm_order[row, :] = np.random.permutation(mat_n)
permuted_stats = np.empty([permutations], dtype=x_data.dtype)
mantel_perm_pearsonr_cy(x_data, perm_order, xmean, normxm,
ym_normalized, permuted_stats)
return orig_stat, permuted_stats
|
58,837 |
def _exec_fft(a, direction, value_type, norm, axis, overwrite_x,
out_size=None, out=None, plan=None):
fft_type = _convert_fft_type(a.dtype, value_type)
if axis % a.ndim != a.ndim - 1:
a = a.swapaxes(axis, -1)
if a.base is not None or not a.flags.c_contiguous:
a = a.copy()
elif (value_type == 'C2R' and not overwrite_x and
10010 <= cupy.cuda.runtime.runtimeGetVersion()):
# The input array may be modified in CUDA 10.1 and above.
# See #3763 for the discussion.
a = a.copy()
elif cupy.cuda.runtime.is_hip and value_type != 'C2C':
# hipFFT's R2C would overwrite input
# hipFFT's C2R needs a workaround (see below)
a = a.copy()
n = a.shape[-1]
if n < 1:
raise ValueError(
'Invalid number of FFT data points (%d) specified.' % n)
# Workaround for hipFFT/rocFFT:
# Both cuFFT and hipFFT/rocFFT have this requirement that 0-th and
# N/2-th element must be real, but cuFFT internally simply ignores it
# while hipFFT handles it badly in both Plan1d and PlanNd, so we must
# do the correction ourselves to ensure the condition is met.
if cupy.cuda.runtime.is_hip and value_type == 'C2R':
a[..., 0].imag = 0
if out_size is None:
a[..., -1].imag = 0
elif out_size % 2 == 0:
a[..., out_size // 2].imag = 0
if out_size is None:
out_size = n
batch = a.size // n
# plan search precedence:
# 1. plan passed in as an argument
# 2. plan as context manager
# 3. cached plan
# 4. create a new one
curr_plan = cufft.get_current_plan()
if curr_plan is not None:
if plan is None:
plan = curr_plan
else:
raise RuntimeError('Use the cuFFT plan either as a context manager'
' or as an argument.')
if plan is None:
devices = None if not config.use_multi_gpus else config._devices
# TODO(leofang): do we need to add the current stream to keys?
keys = (out_size, fft_type, batch, devices)
mgr = config.get_current_callback_manager()
if mgr is not None:
# to avoid a weird segfault, we generate and cache distinct plans
# for every possible (load_aux, store_aux) pairs; the plans are
# still generated from the same external Python module
load_aux = mgr.cb_load_aux_arr
store_aux = mgr.cb_store_aux_arr
keys += (mgr.cb_load, mgr.cb_store,
0 if load_aux is None else load_aux.data.ptr,
0 if store_aux is None else store_aux.data.ptr)
cache = get_plan_cache()
cached_plan = cache.get(keys)
if cached_plan is not None:
plan = cached_plan
elif mgr is None:
plan = cufft.Plan1d(out_size, fft_type, batch, devices=devices)
cache[keys] = plan
else: # has callback
# TODO(leofang): support multi-GPU callback (devices is ignored)
if devices:
raise NotImplementedError('multi-GPU cuFFT callbacks are not '
'yet supported')
plan = mgr.create_plan(('Plan1d', keys[:-5]))
mgr.set_callbacks(plan)
cache[keys] = plan
else:
# check plan validity
if not isinstance(plan, cufft.Plan1d):
raise ValueError('expected plan to have type cufft.Plan1d')
if fft_type != plan.fft_type:
raise ValueError('cuFFT plan dtype mismatch.')
if out_size != plan.nx:
raise ValueError('Target array size does not match the plan.',
out_size, plan.nx)
if batch != plan.batch:
raise ValueError('Batch size does not match the plan.')
if config.use_multi_gpus != (plan.gpus is not None):
raise ValueError('Unclear if multiple GPUs are to be used or not.')
if overwrite_x and value_type == 'C2C':
out = a
elif out is not None:
# verify that out has the expected shape and dtype
plan.check_output_array(a, out)
else:
out = plan.get_output_array(a)
if batch != 0:
plan.fft(a, out, direction)
sz = out.shape[-1]
if fft_type == cufft.CUFFT_R2C or fft_type == cufft.CUFFT_D2Z:
sz = n
if norm == 'backward':
if direction == cufft.CUFFT_INVERSE:
out /= sz
elif norm == 'ortho':
out /= math.sqrt(sz)
elif norm == 'forward':
if direction == cufft.CUFFT_FORWARD:
out /= sz
if axis % a.ndim != a.ndim - 1:
out = out.swapaxes(axis, -1)
return out
|
def _exec_fft(a, direction, value_type, norm, axis, overwrite_x,
out_size=None, out=None, plan=None):
fft_type = _convert_fft_type(a.dtype, value_type)
if axis % a.ndim != a.ndim - 1:
a = a.swapaxes(axis, -1)
if a.base is not None or not a.flags.c_contiguous:
a = a.copy()
elif (value_type == 'C2R' and not overwrite_x and
10010 <= cupy.cuda.runtime.runtimeGetVersion()):
# The input array may be modified in CUDA 10.1 and above.
# See #3763 for the discussion.
a = a.copy()
elif cupy.cuda.runtime.is_hip and value_type != 'C2C':
# hipFFT's R2C would overwrite input
# hipFFT's C2R needs a workaround (see below)
a = a.copy()
n = a.shape[-1]
if n < 1:
raise ValueError(
'Invalid number of FFT data points (%d) specified.' % n)
# Workaround for hipFFT/rocFFT:
# Both cuFFT and hipFFT/rocFFT have this requirement that 0-th and
# N/2-th element must be real, but cuFFT internally simply ignores it
# while hipFFT handles it badly in both Plan1d and PlanNd, so we must
# do the correction ourselves to ensure the condition is met.
if cupy.cuda.runtime.is_hip and value_type == 'C2R':
a[..., 0].imag = 0
if out_size is None:
a[..., -1].imag = 0
elif out_size % 2 == 0:
a[..., out_size // 2].imag = 0
if out_size is None:
out_size = n
batch = a.size // n
# plan search precedence:
# 1. plan passed in as an argument
# 2. plan as context manager
# 3. cached plan
# 4. create a new one
curr_plan = cufft.get_current_plan()
if curr_plan is not None:
if plan is None:
plan = curr_plan
else:
raise RuntimeError('Use the cuFFT plan either as a context manager'
' or as an argument.')
if plan is None:
devices = None if not config.use_multi_gpus else config._devices
# TODO(leofang): do we need to add the current stream to keys?
keys = (out_size, fft_type, batch, devices)
mgr = config.get_current_callback_manager()
if mgr is not None:
# to avoid a weird segfault, we generate and cache distinct plans
# for every possible (load_aux, store_aux) pairs; the plans are
# still generated from the same external Python module
load_aux = mgr.cb_load_aux_arr
store_aux = mgr.cb_store_aux_arr
keys += (mgr.cb_load, mgr.cb_store,
0 if load_aux is None else load_aux.data.ptr,
0 if store_aux is None else store_aux.data.ptr)
cache = get_plan_cache()
cached_plan = cache.get(keys)
if cached_plan is not None:
plan = cached_plan
elif mgr is None:
plan = cufft.Plan1d(out_size, fft_type, batch, devices=devices)
cache[keys] = plan
else: # has callback
# TODO(leofang): support multi-GPU callback (devices is ignored)
if devices:
raise NotImplementedError('multi-GPU cuFFT callbacks are not '
'yet supported')
plan = mgr.create_plan(('Plan1d', keys[:-5]))
mgr.set_callbacks(plan)
cache[keys] = plan
else:
# check plan validity
if not isinstance(plan, cufft.Plan1d):
raise ValueError('expected plan to have type cufft.Plan1d')
if fft_type != plan.fft_type:
raise ValueError('cuFFT plan dtype mismatch.')
if out_size != plan.nx:
raise ValueError('Target array size does not match the plan.',
out_size, plan.nx)
if batch != plan.batch:
raise ValueError('Batch size does not match the plan.')
if config.use_multi_gpus != (plan.gpus is not None):
raise ValueError('Unclear if multiple GPUs are to be used or not.')
if overwrite_x and value_type == 'C2C':
out = a
elif out is not None:
# verify that out has the expected shape and dtype
plan.check_output_array(a, out)
else:
out = plan.get_output_array(a)
if batch != 0:
plan.fft(a, out, direction)
sz = out.shape[-1]
if fft_type == cufft.CUFFT_R2C or fft_type == cufft.CUFFT_D2Z:
sz = n
if norm == 'backward' and direction == cufft.CUFFT_INVERSE:
out /= sz
elif norm == 'ortho':
out /= math.sqrt(sz)
elif norm == 'forward':
if direction == cufft.CUFFT_FORWARD:
out /= sz
if axis % a.ndim != a.ndim - 1:
out = out.swapaxes(axis, -1)
return out
|
27,476 |
def search_all_iam_policies(scope, query=None, page_size=None):
# [START asset_quickstart_search_all_iam_policies]
from google.cloud import asset_v1
client = asset_v1.AssetServiceClient()
response = client.search_all_iam_policies(scope, query, page_size)
for page in response.pages:
for policy in page:
print(policy)
break
# [END asset_quickstart_search_all_iam_policies]
|
def search_all_iam_policies(scope, query=None, page_size=None):
# [START asset_quickstart_search_all_iam_policies]
from google.cloud import asset_v1
client = asset_v1.AssetServiceClient()
response = client.search_all_iam_policies(scope, query=query, page_size=page_size)
for page in response.pages:
for policy in page:
print(policy)
break
# [END asset_quickstart_search_all_iam_policies]
|
8,725 |
def add_common_arguments(parser):
"""Add common and configuration-related arguments to a ``parser``.
:param parser: Argument parser (or subparser)
:type parser: argparse.ArgumentParser
This functions adds the common arguments for Sopel's command line tools.
It adds the following arguments:
* ``-c/--config``: the name of the Sopel config, or its absolute path
* ``--config-dir``: the directory to look for Sopel config
This can be used on an argument parser, or an argument subparser, to handle
these cases::
[sopel-command] -c [filename]
[sopel-command] [action] -c [filename]
[sopel-command] --config-dir [directory] -c [name]
Then, when the parser parses the command line arguments, it will expose
``config`` and ``configdir`` options that can be used to find and load
Sopel's settings.
.. seealso::
The :func:`sopel.cli.utils.load_settings` function uses an ``options``
object from a parser configured with such arguments.
"""
parser.add_argument(
'-c', '--config',
default='default',
metavar='filename',
dest='config',
help='Use a specific configuration file.')
parser.add_argument(
'--config-dir',
default=config.DEFAULT_HOMEDIR,
dest='configdir',
help='Look for configuration from this Sopel config directory.')
|
def add_common_arguments(parser):
"""Add common and configuration-related arguments to a ``parser``.
:param parser: Argument parser (or subparser)
:type parser: argparse.ArgumentParser
This functions adds the common arguments for Sopel's command line tools.
It adds the following arguments:
* ``-c``/``--config``: the name of the Sopel config, or its absolute path
* ``--config-dir``: the directory to look for Sopel config
This can be used on an argument parser, or an argument subparser, to handle
these cases::
[sopel-command] -c [filename]
[sopel-command] [action] -c [filename]
[sopel-command] --config-dir [directory] -c [name]
Then, when the parser parses the command line arguments, it will expose
``config`` and ``configdir`` options that can be used to find and load
Sopel's settings.
.. seealso::
The :func:`sopel.cli.utils.load_settings` function uses an ``options``
object from a parser configured with such arguments.
"""
parser.add_argument(
'-c', '--config',
default='default',
metavar='filename',
dest='config',
help='Use a specific configuration file.')
parser.add_argument(
'--config-dir',
default=config.DEFAULT_HOMEDIR,
dest='configdir',
help='Look for configuration from this Sopel config directory.')
|
4,250 |
def test_pick_types_reject_flat_keys():
"""Test that epochs.pick_types removes keys from reject/flat"""
raw, events, _ = _get_data()
event_id = {'a/1': 1, 'a/2': 2, 'b/1': 3, 'b/2': 4}
picks = pick_types(raw.info, meg=True, eeg=True, ecg=True, eog=True)
epochs = Epochs(raw, events, event_id, preload=True, picks=picks,
reject=dict(grad=1e-10, mag=1e-10, eeg=1e-3, eog=1e-3),
flat=dict(grad=1e-16, mag=1e-16, eeg=1e-16, eog=1e-16))
assert sorted(epochs.reject.keys()) == ['eeg', 'eog', 'grad', 'mag']
assert sorted(epochs.flat.keys()) == ['eeg', 'eog', 'grad', 'mag']
epochs.pick_types(meg=True, eeg=False, ecg=False, eog=False)
assert sorted(epochs.reject.keys()) == ['grad', 'mag']
assert sorted(epochs.flat.keys()) == ['grad', 'mag']
|
def test_pick_types_reject_flat_keys():
"""Test that epochs.pick_types removes keys from reject/flat."""
raw, events, _ = _get_data()
event_id = {'a/1': 1, 'a/2': 2, 'b/1': 3, 'b/2': 4}
picks = pick_types(raw.info, meg=True, eeg=True, ecg=True, eog=True)
epochs = Epochs(raw, events, event_id, preload=True, picks=picks,
reject=dict(grad=1e-10, mag=1e-10, eeg=1e-3, eog=1e-3),
flat=dict(grad=1e-16, mag=1e-16, eeg=1e-16, eog=1e-16))
assert sorted(epochs.reject.keys()) == ['eeg', 'eog', 'grad', 'mag']
assert sorted(epochs.flat.keys()) == ['eeg', 'eog', 'grad', 'mag']
epochs.pick_types(meg=True, eeg=False, ecg=False, eog=False)
assert sorted(epochs.reject.keys()) == ['grad', 'mag']
assert sorted(epochs.flat.keys()) == ['grad', 'mag']
|
40,572 |
def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None):
if solver_name is None:
solver_name = snakemake.config['solving']['solver']['name']
L = (n.loads_t.p_set.mean()
.groupby(n.loads.bus).sum()
.groupby([n.buses.country, n.buses.sub_network]).sum()
.pipe(normed))
N = n.buses.groupby(['country', 'sub_network']).size()
assert n_clusters >= len(N) and n_clusters <= N.sum(), \
"Number of clusters must be {} <= n_clusters <= {} for this selection of countries.".format(len(N), N.sum())
if focus_weights is not None:
total_focus = sum(list(focus_weights.values()))
assert total_focus <= 1.0, "The sum of focus weights must be less than 1."
for country, weight in focus_weights.items():
L[country] = weight
remainder = [c not in focus_weights.keys() for c in L.index.get_level_values('country')]
L[remainder] = L.loc[remainder].pipe(normed) * (1 - total_focus)
logger.warning('Using custom focus weights for determining number of clusters.')
assert L.sum() == 1.0, "Country weights L must sum up to 1.0 when distributing clusters."
m = po.ConcreteModel()
def n_bounds(model, *n_id):
return (1, N[n_id])
m.n = po.Var(list(L.index), bounds=n_bounds, domain=po.Integers)
m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters))
m.objective = po.Objective(expr=sum((m.n[i] - L.loc[i]*n_clusters)**2 for i in L.index),
sense=po.minimize)
opt = po.SolverFactory(solver_name)
if not opt.has_capability('quadratic_objective'):
logger.warn(f'The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`.')
opt = po.SolverFactory('ipopt')
results = opt.solve(m)
assert results['Solver'][0]['Status'].key == 'ok', "Solver returned non-optimally: {}".format(results)
return pd.Series(m.n.get_values(), index=L.index).astype(int)
|
def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None):
if solver_name is None:
solver_name = snakemake.config['solving']['solver']['name']
L = (n.loads_t.p_set.mean()
.groupby(n.loads.bus).sum()
.groupby([n.buses.country, n.buses.sub_network]).sum()
.pipe(normed))
N = n.buses.groupby(['country', 'sub_network']).size()
assert n_clusters >= len(N) and n_clusters <= N.sum(), \
"Number of clusters must be {} <= n_clusters <= {} for this selection of countries.".format(len(N), N.sum())
if focus_weights is not None:
total_focus = sum(list(focus_weights.values()))
assert total_focus <= 1.0, "The sum of focus weights must be less than 1."
for country, weight in focus_weights.items():
L[country] = weight / len(L[country])
remainder = [c not in focus_weights.keys() for c in L.index.get_level_values('country')]
L[remainder] = L.loc[remainder].pipe(normed) * (1 - total_focus)
logger.warning('Using custom focus weights for determining number of clusters.')
assert L.sum() == 1.0, "Country weights L must sum up to 1.0 when distributing clusters."
m = po.ConcreteModel()
def n_bounds(model, *n_id):
return (1, N[n_id])
m.n = po.Var(list(L.index), bounds=n_bounds, domain=po.Integers)
m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters))
m.objective = po.Objective(expr=sum((m.n[i] - L.loc[i]*n_clusters)**2 for i in L.index),
sense=po.minimize)
opt = po.SolverFactory(solver_name)
if not opt.has_capability('quadratic_objective'):
logger.warn(f'The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`.')
opt = po.SolverFactory('ipopt')
results = opt.solve(m)
assert results['Solver'][0]['Status'].key == 'ok', "Solver returned non-optimally: {}".format(results)
return pd.Series(m.n.get_values(), index=L.index).astype(int)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.