id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
26,465 |
def get_new_command(command):
if "install" in command.script_parts and "composer require" in command.output.lower():
broken_cmd = "install"
new_cmd = "require"
else:
broken_cmd = re.findall(r"Command \"([^']*)\" is not defined", command.output)[0]
new_cmd = re.findall(r'Did you mean this\?[^\n]*\n\s*([^\n]*)', command.output)
if not new_cmd:
new_cmd = re.findall(r'Did you mean one of these\?[^\n]*\n\s*([^\n]*)', command.output)
new_cmd = new_cmd[0].strip()
return replace_argument(command.script, broken_cmd, new_cmd)
|
def get_new_command(command):
if "install" in command.script_parts and "composer require" in command.output.lower():
broken_cmd = "install"
broken_cmd, new_cmd = "install", "require"
else:
broken_cmd = re.findall(r"Command \"([^']*)\" is not defined", command.output)[0]
new_cmd = re.findall(r'Did you mean this\?[^\n]*\n\s*([^\n]*)', command.output)
if not new_cmd:
new_cmd = re.findall(r'Did you mean one of these\?[^\n]*\n\s*([^\n]*)', command.output)
new_cmd = new_cmd[0].strip()
return replace_argument(command.script, broken_cmd, new_cmd)
|
29,774 |
def save_converted_files(res, item_dicoms, bids_options, outtype, prefix, outname_bids, overwrite):
"""Copy converted files from tempdir to output directory.
Will rename files if necessary.
Parameters
----------
res : Node
Nipype conversion Node with results
item_dicoms: list of filenames
DICOMs converted
bids : list or None
If not list save to BIDS
List may contain bids specific options
prefix : string
Returns
-------
bids_outfiles
Converted BIDS files
"""
from nipype.interfaces.base import isdefined
prefix_dirname, prefix_basename = op.split(prefix)
bids_outfiles = []
res_files = res.outputs.converted_files
if not len(res_files):
lgr.debug("DICOMs {} were not converted".format(item_dicoms))
return
if isdefined(res.outputs.bvecs) and isdefined(res.outputs.bvals):
outname_bvecs, outname_bvals = prefix + '.bvec', prefix + '.bval'
safe_copyfile(res.outputs.bvecs, outname_bvecs, overwrite)
safe_copyfile(res.outputs.bvals, outname_bvals, overwrite)
if isinstance(res_files, list):
res_files = sorted(res_files)
# we should provide specific handling for fmap,
# dwi etc which might spit out multiple files
suffixes = ([str(i+1) for i in range(len(res_files))]
if (bids_options is not None) else None)
if not suffixes:
lgr.warning("Following series files likely have "
"multiple (%d) volumes (orientations?) "
"generated: %s ...",
len(res_files), item_dicoms[0])
suffixes = [str(-i-1) for i in range(len(res_files))]
# Also copy BIDS files although they might need to
# be merged/postprocessed later
bids_files = (sorted(res.outputs.bids)
if len(res.outputs.bids) == len(res_files)
else [None] * len(res_files))
### Do we have a multi-echo series? ###
# Some Siemens sequences (e.g. CMRR's MB-EPI) set the label 'TE1',
# 'TE2', etc. in the 'ImageType' field. However, other seqs do not
# (e.g. MGH ME-MPRAGE). They do set a 'EchoNumber', but not for the
# first echo. To compound the problem, the echoes are NOT in order,
# so the first NIfTI file does not correspond to echo-1, etc. So, we
# need to know, beforehand, whether we are dealing with a multi-echo
# series. To do that, the most straightforward way is to read the
# echo times for all bids_files and see if they are all the same or not.
# Check for varying echo times
echo_times = sorted(list(set(
load_json(b).get('EchoTime', None)
for b in bids_files
if b
)))
is_multiecho = len(echo_times) > 1
### Loop through the bids_files, set the output name and save files
for fl, suffix, bids_file in zip(res_files, suffixes, bids_files):
# TODO: monitor conversion duration
if bids_file:
fileinfo = load_json(bids_file)
# set the prefix basename for this specific file (we'll modify it,
# and we don't want to modify it for all the bids_files):
this_prefix_basename = prefix_basename
# _sbref sequences reconstructing magnitude and phase generate
# two NIfTI files IN THE SAME SERIES, so we cannot just add
# the suffix, if we want to be bids compliant:
if bids_file and this_prefix_basename.endswith('_sbref'):
# Check to see if it is magnitude or phase reconstruction:
if 'M' in fileinfo.get('ImageType'):
mag_or_phase = 'magnitude'
elif 'P' in fileinfo.get('ImageType'):
mag_or_phase = 'phase'
else:
mag_or_phase = suffix
# Insert reconstruction label
if not ("_rec-%s" % mag_or_phase) in this_prefix_basename:
# If "_rec-" is specified, prepend the 'mag_or_phase' value.
if ('_rec-' in this_prefix_basename):
raise BIDSError(
"Reconstruction label for multi-echo single-band"
" reference images will be automatically set, remove"
" from heuristic"
)
# If not, insert "_rec-" + 'mag_or_phase' into the prefix_basename
# **before** "_run", "_echo" or "_sbref", whichever appears first:
for label in ['_run', '_echo', '_sbref']:
if (label in this_prefix_basename):
this_prefix_basename = this_prefix_basename.replace(
label, "_rec-%s%s" % (mag_or_phase, label)
)
break
# Now check if this run is multi-echo
# (Note: it can be _sbref and multiecho, so don't use "elif"):
# For multi-echo sequences, we have to specify the echo number in
# the file name:
if bids_file and is_multiecho:
# Get the EchoNumber from json file info. If not present, use EchoTime
if 'EchoNumber' in fileinfo.keys():
echo_number = fileinfo['EchoNumber']
else:
echo_number = echo_times.index(fileinfo['EchoTime']) + 1
supported_multiecho = ['_bold', '_epi', '_sbref', '_T1w', '_PDT2']
# Now, decide where to insert it.
# Insert it **before** the following string(s), whichever appears first.
for imgtype in supported_multiecho:
if (imgtype in this_prefix_basename):
this_prefix_basename = this_prefix_basename.replace(
imgtype, "_echo-%d%s" % (echo_number, imgtype)
)
break
# Fallback option:
# If we have failed to modify this_prefix_basename, because it didn't fall
# into any of the options above, just add the suffix at the end:
if this_prefix_basename == prefix_basename:
this_prefix_basename += suffix
# Finally, form the outname by stitching the directory and outtype:
outname = op.join(prefix_dirname, this_prefix_basename)
outfile = outname + '.' + outtype
# Write the files needed:
safe_copyfile(fl, outfile, overwrite)
if bids_file:
outname_bids_file = "%s.json" % (outname)
safe_copyfile(bids_file, outname_bids_file, overwrite)
bids_outfiles.append(outname_bids_file)
# res_files is not a list
else:
outname = "{}.{}".format(prefix, outtype)
safe_copyfile(res_files, outname, overwrite)
if isdefined(res.outputs.bids):
try:
safe_copyfile(res.outputs.bids, outname_bids, overwrite)
bids_outfiles.append(outname_bids)
except TypeError as exc: ##catch lists
raise TypeError("Multiple BIDS sidecars detected.")
return bids_outfiles
|
def save_converted_files(res, item_dicoms, bids_options, outtype, prefix, outname_bids, overwrite):
"""Copy converted files from tempdir to output directory.
Will rename files if necessary.
Parameters
----------
res : Node
Nipype conversion Node with results
item_dicoms: list of filenames
DICOMs converted
bids : list or None
If not list save to BIDS
List may contain bids specific options
prefix : string
Returns
-------
bids_outfiles
Converted BIDS files
"""
from nipype.interfaces.base import isdefined
prefix_dirname, prefix_basename = op.split(prefix)
bids_outfiles = []
res_files = res.outputs.converted_files
if not len(res_files):
lgr.debug("DICOMs {} were not converted".format(item_dicoms))
return
if isdefined(res.outputs.bvecs) and isdefined(res.outputs.bvals):
outname_bvecs, outname_bvals = prefix + '.bvec', prefix + '.bval'
safe_copyfile(res.outputs.bvecs, outname_bvecs, overwrite)
safe_copyfile(res.outputs.bvals, outname_bvals, overwrite)
if isinstance(res_files, list):
res_files = sorted(res_files)
# we should provide specific handling for fmap,
# dwi etc which might spit out multiple files
suffixes = ([str(i+1) for i in range(len(res_files))]
if (bids_options is not None) else None)
if not suffixes:
lgr.warning("Following series files likely have "
"multiple (%d) volumes (orientations?) "
"generated: %s ...",
len(res_files), item_dicoms[0])
suffixes = [str(-i-1) for i in range(len(res_files))]
# Also copy BIDS files although they might need to
# be merged/postprocessed later
bids_files = (sorted(res.outputs.bids)
if len(res.outputs.bids) == len(res_files)
else [None] * len(res_files))
### Do we have a multi-echo series? ###
# Some Siemens sequences (e.g. CMRR's MB-EPI) set the label 'TE1',
# 'TE2', etc. in the 'ImageType' field. However, other seqs do not
# (e.g. MGH ME-MPRAGE). They do set a 'EchoNumber', but not for the
# first echo. To compound the problem, the echoes are NOT in order,
# so the first NIfTI file does not correspond to echo-1, etc. So, we
# need to know, beforehand, whether we are dealing with a multi-echo
# series. To do that, the most straightforward way is to read the
# echo times for all bids_files and see if they are all the same or not.
# Check for varying echo times
echo_times = sorted(list(set(
load_json(b).get('EchoTime', None)
for b in bids_files
if b
)))
is_multiecho = len(echo_times) > 1
### Loop through the bids_files, set the output name and save files
for fl, suffix, bids_file in zip(res_files, suffixes, bids_files):
# TODO: monitor conversion duration
if bids_file:
fileinfo = load_json(bids_file)
# set the prefix basename for this specific file (we'll modify it,
# and we don't want to modify it for all the bids_files):
this_prefix_basename = prefix_basename
# _sbref sequences reconstructing magnitude and phase generate
# two NIfTI files IN THE SAME SERIES, so we cannot just add
# the suffix, if we want to be bids compliant:
if bids_file and this_prefix_basename.endswith('_sbref'):
# Check to see if it is magnitude or phase reconstruction:
if 'M' in fileinfo.get('ImageType'):
mag_or_phase = 'magnitude'
elif 'P' in fileinfo.get('ImageType'):
mag_or_phase = 'phase'
else:
mag_or_phase = suffix
# Insert reconstruction label
if not ("_rec-%s" % mag_or_phase) in this_prefix_basename:
# If "_rec-" is specified, prepend the 'mag_or_phase' value.
if ('_rec-' in this_prefix_basename):
raise BIDSError(
"Reconstruction label for multi-echo single-band"
" reference images will be automatically set, remove"
" from heuristic"
)
# If not, insert "_rec-" + 'mag_or_phase' into the prefix_basename
# **before** "_run", "_echo" or "_sbref", whichever appears first:
for label in ['_run', '_echo', '_sbref']:
if (label in this_prefix_basename):
this_prefix_basename = this_prefix_basename.replace(
label, "_rec-%s%s" % (mag_or_phase, label)
)
break
# Now check if this run is multi-echo
# (Note: it can be _sbref and multiecho, so don't use "elif"):
# For multi-echo sequences, we have to specify the echo number in
# the file name:
if bids_file and is_multiecho:
# Get the EchoNumber from json file info. If not present, use EchoTime
if 'EchoNumber' in fileinfo.keys():
echo_number = fileinfo['EchoNumber']
elif fileinfo.get('EchoTime'):
echo_number = echo_times.index(fileinfo['EchoTime']) + 1
supported_multiecho = ['_bold', '_epi', '_sbref', '_T1w', '_PDT2']
# Now, decide where to insert it.
# Insert it **before** the following string(s), whichever appears first.
for imgtype in supported_multiecho:
if (imgtype in this_prefix_basename):
this_prefix_basename = this_prefix_basename.replace(
imgtype, "_echo-%d%s" % (echo_number, imgtype)
)
break
# Fallback option:
# If we have failed to modify this_prefix_basename, because it didn't fall
# into any of the options above, just add the suffix at the end:
if this_prefix_basename == prefix_basename:
this_prefix_basename += suffix
# Finally, form the outname by stitching the directory and outtype:
outname = op.join(prefix_dirname, this_prefix_basename)
outfile = outname + '.' + outtype
# Write the files needed:
safe_copyfile(fl, outfile, overwrite)
if bids_file:
outname_bids_file = "%s.json" % (outname)
safe_copyfile(bids_file, outname_bids_file, overwrite)
bids_outfiles.append(outname_bids_file)
# res_files is not a list
else:
outname = "{}.{}".format(prefix, outtype)
safe_copyfile(res_files, outname, overwrite)
if isdefined(res.outputs.bids):
try:
safe_copyfile(res.outputs.bids, outname_bids, overwrite)
bids_outfiles.append(outname_bids)
except TypeError as exc: ##catch lists
raise TypeError("Multiple BIDS sidecars detected.")
return bids_outfiles
|
17,461 |
def broadcast_compat_data(
variable: Variable,
broadcast_dims: Tuple[Hashable, ...],
core_dims: Tuple[Hashable, ...],
) -> Any:
data = variable.data
old_dims = variable.dims
new_dims = broadcast_dims + core_dims
if new_dims == old_dims:
# optimize for the typical case
return data
set_old_dims = set(old_dims)
missing_core_dims = [d for d in core_dims if d not in set_old_dims]
if missing_core_dims:
raise ValueError(
"operand to apply_ufunc has required core dimensions {}, but "
"some of these dimensions are absent on an input variable: {}".format(
list(core_dims), missing_core_dims
)
)
set_new_dims = set(new_dims)
unexpected_dims = [d for d in old_dims if d not in set_new_dims]
if unexpected_dims:
raise ValueError(
"operand to apply_ufunc encountered unexpected "
f"dimensions {unexpected_dims!r} on an input variable: these are core "
"dimensions on other input or output variables"
)
# for consistency with numpy, keep broadcast dimensions to the left
old_broadcast_dims = tuple(d for d in broadcast_dims if d in set_old_dims)
reordered_dims = old_broadcast_dims + core_dims
if reordered_dims != old_dims:
order = tuple(old_dims.index(d) for d in reordered_dims)
data = duck_array_ops.transpose(data, order)
if new_dims != reordered_dims:
key_parts: list[Union[slice, None]] = []
for dim in new_dims:
if dim in set_old_dims:
key_parts.append(SLICE_NONE)
elif key_parts:
# no need to insert new axes at the beginning that are already
# handled by broadcasting
key_parts.append(np.newaxis)
data = data[tuple(key_parts)]
return data
|
def broadcast_compat_data(
variable: Variable,
broadcast_dims: Tuple[Hashable, ...],
core_dims: Tuple[Hashable, ...],
) -> Any:
data = variable.data
old_dims = variable.dims
new_dims = broadcast_dims + core_dims
if new_dims == old_dims:
# optimize for the typical case
return data
set_old_dims = set(old_dims)
missing_core_dims = [d for d in core_dims if d not in set_old_dims]
if missing_core_dims:
raise ValueError(
"operand to apply_ufunc has required core dimensions {}, but "
"some of these dimensions are absent on an input variable: {}".format(
list(core_dims), missing_core_dims
)
)
set_new_dims = set(new_dims)
unexpected_dims = [d for d in old_dims if d not in set_new_dims]
if unexpected_dims:
raise ValueError(
"operand to apply_ufunc encountered unexpected "
f"dimensions {unexpected_dims!r} on an input variable: these are core "
"dimensions on other input or output variables"
)
# for consistency with numpy, keep broadcast dimensions to the left
old_broadcast_dims = tuple(d for d in broadcast_dims if d in set_old_dims)
reordered_dims = old_broadcast_dims + core_dims
if reordered_dims != old_dims:
order = tuple(old_dims.index(d) for d in reordered_dims)
data = duck_array_ops.transpose(data, order)
if new_dims != reordered_dims:
key_parts: List[Optional[slice]] = []
for dim in new_dims:
if dim in set_old_dims:
key_parts.append(SLICE_NONE)
elif key_parts:
# no need to insert new axes at the beginning that are already
# handled by broadcasting
key_parts.append(np.newaxis)
data = data[tuple(key_parts)]
return data
|
28,062 |
def call_command(cmd, cwd, env):
"""
Execute a process in a test case. If the run is successful do not bloat
the test output, but in case of any failure dump stdout and stderr.
Returns (stdout, stderr) pair of strings.
"""
def show(out, err):
print("\nTEST execute stdout:\n")
print(out)
print("\nTEST execute stderr:\n")
print(err)
cmd_log = ' '.join([shlex.quote(x) for x in cmd])
try:
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
env=env,
encoding="utf-8",
errors="ignore")
out, err = proc.communicate()
if proc.returncode != 0:
show(out, err)
print('Unsuccessful run: ' + cmd_log)
print(proc.returncode)
return out, err, proc.returncode
except OSError as oerr:
print(oerr)
show(out, err)
print('Failed to run: ' + cmd_log)
raise
|
def call_command(cmd, cwd, env):
"""
Execute a process in a test case. If the run is successful do not bloat
the test output, but in case of any failure dump stdout and stderr.
Returns (stdout, stderr) pair of strings.
"""
def show(out, err):
print("\nTEST execute stdout:\n")
print(out)
print("\nTEST execute stderr:\n")
print(err)
cmd_log = ' '.join([shlex.quote(x) for x in cmd])
try:
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
env=env,
encoding="utf-8",
errors="ignore")
out, err = proc.communicate()
if proc.returncode != 0:
show(out, err)
print(f'Unsuccessful run: {cmd_log}')
print(proc.returncode)
return out, err, proc.returncode
except OSError as oerr:
print(oerr)
show(out, err)
print('Failed to run: ' + cmd_log)
raise
|
52,460 |
def test_underscore_for_span(en_tokenizer) :
Doc.set_extension(name='doc_extension', default=None)
Span.set_extension(name='span_extension', default=None)
Token.set_extension(name='token_extension', default=None)
text = 'Hello, world!'
doc = en_tokenizer(text)
span_1 = Span(doc, 0, 2, 'SPAN_1')
span_2 = Span(doc, 0, 2, 'SPAN_2')
doc._.doc_extension = "doc extension"
doc[0]._.token_extension = "token extension"
span_1._.span_extension = 'span_1 extension'
span_2._.span_extension = 'span_2 extension'
doc._.doc_extension == "doc extension"
doc[0]._.token_extension == "token extension"
span_1._.span_extension == 'span_1 extension'
span_2._.span_extension == 'span_2 extension'
assert doc.user_data[('._.', 'span_extension', span_1.start_char, span_1.end_char, span_1.label, span_1.kb_id)] == 'span_1 extension'
assert doc.user_data[('._.', 'span_extension', span_2.start_char, span_2.end_char, span_2.label, span_2.kb_id)] == 'span_2 extension'
span_1.label_ = "NEW_LABEL"
assert doc.user_data[('._.', 'span_extension', span_1.start_char, span_1.end_char, span_1.label, span_1.kb_id)] == 'span_1 extension'
assert doc.user_data[('._.', 'span_extension', span_2.start_char, span_2.end_char, span_2.label, span_2.kb_id)] == 'span_2 extension'
doc._.doc_extension == "doc extension"
doc[0]._.token_extension == "token extension"
span_1._.span_extension == 'span_1 extension'
span_2._.span_extension == 'span_2 extension'
span_1.kb_id_ = "KB_ID"
assert doc.user_data[('._.', 'span_extension', span_1.start_char, span_1.end_char, span_1.label, span_1.kb_id)] == 'span_1 extension'
assert doc.user_data[('._.', 'span_extension', span_2.start_char, span_2.end_char, span_2.label, span_2.kb_id)] == 'span_2 extension'
doc._.doc_extension == "doc extension"
doc[0]._.token_extension == "token extension"
span_1._.span_extension == 'span_1 extension'
span_2._.span_extension == 'span_2 extension'
assert doc.user_data[('._.', 'doc_extension', None, None)] == 'doc extension'
assert doc.user_data[('._.', 'token_extension', 0, None)] == 'token extension'
span_2._.span_extension = 'updated span_2 extension'
assert doc.user_data[('._.', 'span_extension', span_1.start_char, span_1.end_char, span_1.label, span_1.kb_id)] == 'span_1 extension'
assert doc.user_data[('._.', 'span_extension', span_2.start_char, span_2.end_char, span_2.label, span_2.kb_id)] == 'updated span_2 extension'
doc._.doc_extension == "doc extension"
doc[0]._.token_extension == "token extension"
span_1._.span_extension == 'span_1 extension'
span_2._.span_extension == 'updated span_2 extension'
|
def test_underscore_for_span(en_tokenizer) :
Doc.set_extension(name='doc_extension', default=None)
Span.set_extension(name='span_extension', default=None)
Token.set_extension(name='token_extension', default=None)
text = 'Hello, world!'
doc = en_tokenizer(text)
span_1 = Span(doc, 0, 2, 'SPAN_1')
span_2 = Span(doc, 0, 2, 'SPAN_2')
doc._.doc_extension = "doc extension"
doc[0]._.token_extension = "token extension"
span_1._.span_extension = 'span_1 extension'
span_2._.span_extension = 'span_2 extension'
doc._.doc_extension == "doc extension"
doc[0]._.token_extension == "token extension"
span_1._.span_extension == 'span_1 extension'
span_2._.span_extension == 'span_2 extension'
assert doc.user_data[('._.', 'span_extension', span_1.start_char, span_1.end_char, span_1.label, span_1.kb_id)] == 'span_1 extension'
assert doc.user_data[('._.', 'span_extension', span_2.start_char, span_2.end_char, span_2.label, span_2.kb_id)] == 'span_2 extension'
span_1.label_ = "NEW_LABEL"
assert doc.user_data[('._.', 'span_extension', span_1.start_char, span_1.end_char, span_1.label, span_1.kb_id)] == 'span_1 extension'
doc._.doc_extension == "doc extension"
doc[0]._.token_extension == "token extension"
span_1._.span_extension == 'span_1 extension'
span_2._.span_extension == 'span_2 extension'
span_1.kb_id_ = "KB_ID"
assert doc.user_data[('._.', 'span_extension', span_1.start_char, span_1.end_char, span_1.label, span_1.kb_id)] == 'span_1 extension'
assert doc.user_data[('._.', 'span_extension', span_2.start_char, span_2.end_char, span_2.label, span_2.kb_id)] == 'span_2 extension'
doc._.doc_extension == "doc extension"
doc[0]._.token_extension == "token extension"
span_1._.span_extension == 'span_1 extension'
span_2._.span_extension == 'span_2 extension'
assert doc.user_data[('._.', 'doc_extension', None, None)] == 'doc extension'
assert doc.user_data[('._.', 'token_extension', 0, None)] == 'token extension'
span_2._.span_extension = 'updated span_2 extension'
assert doc.user_data[('._.', 'span_extension', span_1.start_char, span_1.end_char, span_1.label, span_1.kb_id)] == 'span_1 extension'
assert doc.user_data[('._.', 'span_extension', span_2.start_char, span_2.end_char, span_2.label, span_2.kb_id)] == 'updated span_2 extension'
doc._.doc_extension == "doc extension"
doc[0]._.token_extension == "token extension"
span_1._.span_extension == 'span_1 extension'
span_2._.span_extension == 'updated span_2 extension'
|
6,050 |
def executeJob(executableFile, proxy, taskID, **kwargs):
""" wrapper around ce.submitJob: decides which CE to use (Sudo or InProcess)
:param str proxy: proxy file location to be used for job submission
:param int taskID: local task ID of the PoolCE
:return:
"""
useSudo = kwargs.pop('UseSudo', False)
if useSudo:
ce = SudoComputingElement("Task-" + str(taskID))
payloadUser = kwargs.get('PayloadUser')
if payloadUser:
ce.setParameters({'PayloadUser': payloadUser})
else:
ce = InProcessComputingElement("Task-" + str(taskID))
return ce.submitJob(executableFile, proxy)
|
def executeJob(executableFile, proxy, taskID, **kwargs):
""" wrapper around ce.submitJob: decides which CE to use (Sudo or InProcess)
:param str executableFile: location of the executable file
:param str proxy: proxy file location to be used for job submission
:param int taskID: local task ID of the PoolCE
:return:
"""
useSudo = kwargs.pop('UseSudo', False)
if useSudo:
ce = SudoComputingElement("Task-" + str(taskID))
payloadUser = kwargs.get('PayloadUser')
if payloadUser:
ce.setParameters({'PayloadUser': payloadUser})
else:
ce = InProcessComputingElement("Task-" + str(taskID))
return ce.submitJob(executableFile, proxy)
|
40,540 |
def add_fd_backend(cmd, resource_group_name, front_door_name, backend_pool_name, address,
http_port=80, https_port=443, disabled=None, priority=1, weight=50,
backend_host_header=None, private_link_alias=None, private_link_resource_id=None,
private_link_location=None, private_link_approval_message=None):
from azext_front_door.vendored_sdks.models import Backend
backend = Backend(
address=address,
http_port=http_port,
https_port=https_port,
enabled_state='Disabled' if disabled else 'Enabled',
priority=priority,
weight=weight,
backend_host_header=address if backend_host_header==None else backend_host_header,
private_link_alias=private_link_alias,
private_link_resource_id=private_link_resource_id,
private_link_location=private_link_location,
private_link_approval_message=private_link_approval_message
)
client = cf_frontdoor(cmd.cli_ctx, None)
frontdoor = client.get(resource_group_name, front_door_name)
backend_pool = next((x for x in frontdoor.backend_pools if x.name == backend_pool_name), None)
if not backend_pool:
from knack.util import CLIError
raise CLIError("Backend pool '{}' could not be found on frontdoor '{}'".format(
backend_pool_name, front_door_name))
backend_pool.backends.append(backend)
client.begin_create_or_update(resource_group_name, front_door_name, frontdoor).result()
return backend
|
def add_fd_backend(cmd, resource_group_name, front_door_name, backend_pool_name, address,
http_port=80, https_port=443, disabled=None, priority=1, weight=50,
backend_host_header=None, private_link_alias=None, private_link_resource_id=None,
private_link_location=None, private_link_approval_message=None):
from azext_front_door.vendored_sdks.models import Backend
backend = Backend(
address=address,
http_port=http_port,
https_port=https_port,
enabled_state='Disabled' if disabled else 'Enabled',
priority=priority,
weight=weight,
backend_host_header=address if backend_host_header is None else backend_host_header,
private_link_alias=private_link_alias,
private_link_resource_id=private_link_resource_id,
private_link_location=private_link_location,
private_link_approval_message=private_link_approval_message
)
client = cf_frontdoor(cmd.cli_ctx, None)
frontdoor = client.get(resource_group_name, front_door_name)
backend_pool = next((x for x in frontdoor.backend_pools if x.name == backend_pool_name), None)
if not backend_pool:
from knack.util import CLIError
raise CLIError("Backend pool '{}' could not be found on frontdoor '{}'".format(
backend_pool_name, front_door_name))
backend_pool.backends.append(backend)
client.begin_create_or_update(resource_group_name, front_door_name, frontdoor).result()
return backend
|
30,694 |
def test_module(client: Client, *_) -> Tuple[Any, Dict[Any, Any], Dict[Any, Any], bool]:
# Validate fetch_time parameter is valid (if not, parse_date_range will raise the error message)
parse_date_range(client.fetch_time, '%Y-%m-%d %H:%M:%S')
result = client.send_request(f'table/{client.ticket_type}?sysparm_limit=1', 'GET')
if 'result' not in result:
raise Exception('ServiceNow error: ' + str(result))
ticket = result.get('result')
if ticket and demisto.params().get('isFetch'):
if isinstance(ticket, list):
ticket = ticket[0]
if client.timestamp_field not in ticket:
raise ValueError(f"The timestamp field [{client.timestamp_field}] does not exist in the ticket.")
if client.incident_name not in ticket:
raise ValueError(f"The field [{client.incident_name}] does not exist in the ticket.")
return 'ok', {}, {}, True
|
def test_module(client: Client, *_) -> tuple:
# Validate fetch_time parameter is valid (if not, parse_date_range will raise the error message)
parse_date_range(client.fetch_time, '%Y-%m-%d %H:%M:%S')
result = client.send_request(f'table/{client.ticket_type}?sysparm_limit=1', 'GET')
if 'result' not in result:
raise Exception('ServiceNow error: ' + str(result))
ticket = result.get('result')
if ticket and demisto.params().get('isFetch'):
if isinstance(ticket, list):
ticket = ticket[0]
if client.timestamp_field not in ticket:
raise ValueError(f"The timestamp field [{client.timestamp_field}] does not exist in the ticket.")
if client.incident_name not in ticket:
raise ValueError(f"The field [{client.incident_name}] does not exist in the ticket.")
return 'ok', {}, {}, True
|
47,930 |
def main():
parser = argparse.ArgumentParser(description='Whiteboard inpainting demo')
parser.add_argument('-i', type=str, help='Input sources (index of camera \
or path to a video file)', required=True)
parser.add_argument('-loop', '--loop', default=False, action='store_true',
help='Optional. Enable reading the input in a loop')
parser.add_argument('-m_i', '--m_instance_segmentation', type=str, required=False,
help='Path to the instance segmentation model')
parser.add_argument('-m_s', '--m_semantic_segmentation', type=str, required=False,
help='Path to the semantic segmentation model')
parser.add_argument('-t', '--threshold', type=float, default=0.6,
help='Threshold for person instance segmentation model')
parser.add_argument('--output_video', type=str, default='', required=False,
help='Optional. Path to output video')
parser.add_argument("--no_show", help="Optional. Don't show output", action='store_true')
parser.add_argument('-d', '--device', type=str, default='CPU',
help='Optional. Specify a target device to infer on. CPU, GPU, FPGA, HDDL or MYRIAD is '
'acceptable. The demo will look for a suitable plugin for the device specified')
parser.add_argument('-l', '--cpu_extension', type=str, default=None,
help='MKLDNN (CPU)-targeted custom layers.Absolute \
path to a shared library with the kernels impl.')
parser.add_argument('-u', '--utilization_monitors', default='', type=str,
help='Optional. List of monitors to show initially')
args = parser.parse_args()
cap = open_images_capture(args.input, args.loop)
frame = cap.read()
if frame is None:
raise RuntimeError("Can't read an image from the input")
if bool(args.m_instance_segmentation) == bool(args.m_semantic_segmentation):
raise ValueError('Set up exactly one of segmentation models: '\
'--m_instance_segmentation or --m_semantic_segmentation')
frame_size = frame.shape
fps = cap.fps()
out_frame_size = (int(frame_size[1]), int(frame_size[0] * 2))
presenter = monitors.Presenter(args.utilization_monitors, 20,
(out_frame_size[0] // 4, out_frame_size[1] // 16))
root_dir = osp.dirname(osp.abspath(__file__))
mouse = MouseClick()
if not args.no_show:
cv2.namedWindow(WINNAME)
cv2.setMouseCallback(WINNAME, mouse.get_points)
if args.output_video:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output_video = cv2.VideoWriter(args.output_video, fourcc, fps, out_frame_size)
else:
output_video = None
log.info("Initializing Inference Engine")
ie = IECore()
if args.m_instance_segmentation:
labels_file = osp.join(root_dir, 'coco_labels.txt')
segmentation = MaskRCNN(ie, args.m_instance_segmentation, labels_file,
args.threshold, args.device, args.cpu_extension)
elif args.m_semantic_segmentation:
labels_file = osp.join(root_dir, 'cityscapes_labels.txt')
segmentation = SemanticSegmentation(ie, args.m_semantic_segmentation, labels_file,
args.threshold, args.device, args.cpu_extension)
black_board = False
output_frame = np.full((frame_size[1], frame_size[0], 3), 255, dtype='uint8')
frame_number = 0
key = -1
while frame is not None:
start = time.time()
mask = None
detections = segmentation.get_detections([frame])
expand_mask(detections, frame_size[0] // 27)
if len(detections[0]) > 0:
mask = detections[0][0][2]
for i in range(1, len(detections[0])):
mask = cv2.bitwise_or(mask, detections[0][i][2])
if mask is not None:
mask = np.stack([mask, mask, mask], axis=-1)
else:
mask = np.zeros(frame.shape, dtype='uint8')
clear_frame = remove_background(frame, invert_colors=not black_board)
output_frame = np.where(mask, output_frame, clear_frame)
merged_frame = np.vstack([frame, output_frame])
merged_frame = cv2.resize(merged_frame, out_frame_size)
if output_video is not None:
output_video.write(merged_frame)
presenter.drawGraphs(merged_frame)
if not args.no_show:
cv2.imshow(WINNAME, merged_frame)
key = check_pressed_keys(key)
if key == 27: # 'Esc'
break
if key == ord('i'): # catch pressing of key 'i'
black_board = not black_board
output_frame = 255 - output_frame
else:
presenter.handleKey(key)
if mouse.crop_available:
x0, x1 = min(mouse.points[0][0], mouse.points[1][0]), \
max(mouse.points[0][0], mouse.points[1][0])
y0, y1 = min(mouse.points[0][1], mouse.points[1][1]), \
max(mouse.points[0][1], mouse.points[1][1])
x1, y1 = min(x1, output_frame.shape[1] - 1), min(y1, output_frame.shape[0] - 1)
board = output_frame[y0: y1, x0: x1, :]
if board.shape[0] > 0 and board.shape[1] > 0:
cv2.namedWindow('Board', cv2.WINDOW_KEEPRATIO)
cv2.imshow('Board', board)
end = time.time()
print('\rProcessing frame: {}, fps = {:.3}' \
.format(frame_number, 1. / (end - start)), end="")
frame_number += 1
frame = cap.read()
print('')
log.info(presenter.reportMeans())
if output_video is not None:
output_video.release()
|
def main():
parser = argparse.ArgumentParser(description='Whiteboard inpainting demo')
parser.add_argument('-i', type=str, help='Input sources (index of camera \
or path to a video file)', required=True)
parser.add_argument('-loop', '--loop', default=False, action='store_true',
help='Optional. Enable reading the input in a loop')
parser.add_argument('-m_i', '--m_instance_segmentation', type=str, required=False,
help='Path to the instance segmentation model')
parser.add_argument('-m_s', '--m_semantic_segmentation', type=str, required=False,
help='Path to the semantic segmentation model')
parser.add_argument('-t', '--threshold', type=float, default=0.6,
help='Threshold for person instance segmentation model')
parser.add_argument('--output_video', type=str, default='', required=False,
help='Optional. Path to output video')
parser.add_argument("--no_show", help="Optional. Don't show output", action='store_true')
parser.add_argument('-d', '--device', type=str, default='CPU',
help='Optional. Specify a target device to infer on. CPU, GPU, FPGA, HDDL or MYRIAD is '
'acceptable. The demo will look for a suitable plugin for the device specified')
parser.add_argument('-l', '--cpu_extension', type=str, default=None,
help='MKLDNN (CPU)-targeted custom layers.Absolute \
path to a shared library with the kernels impl.')
parser.add_argument('-u', '--utilization_monitors', default='', type=str,
help='Optional. List of monitors to show initially')
args = parser.parse_args()
cap = open_images_capture(args.input, args.loop)
frame = cap.read()
if frame is None:
raise RuntimeError("Can't read an image from the input")
if bool(args.m_instance_segmentation) == bool(args.m_semantic_segmentation):
raise ValueError('Set up exactly one of segmentation models: '\
'--m_instance_segmentation or --m_semantic_segmentation')
frame_size = frame.shape
fps = cap.fps()
out_frame_size = (frame_size[1], frame_size[0] * 2)
presenter = monitors.Presenter(args.utilization_monitors, 20,
(out_frame_size[0] // 4, out_frame_size[1] // 16))
root_dir = osp.dirname(osp.abspath(__file__))
mouse = MouseClick()
if not args.no_show:
cv2.namedWindow(WINNAME)
cv2.setMouseCallback(WINNAME, mouse.get_points)
if args.output_video:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output_video = cv2.VideoWriter(args.output_video, fourcc, fps, out_frame_size)
else:
output_video = None
log.info("Initializing Inference Engine")
ie = IECore()
if args.m_instance_segmentation:
labels_file = osp.join(root_dir, 'coco_labels.txt')
segmentation = MaskRCNN(ie, args.m_instance_segmentation, labels_file,
args.threshold, args.device, args.cpu_extension)
elif args.m_semantic_segmentation:
labels_file = osp.join(root_dir, 'cityscapes_labels.txt')
segmentation = SemanticSegmentation(ie, args.m_semantic_segmentation, labels_file,
args.threshold, args.device, args.cpu_extension)
black_board = False
output_frame = np.full((frame_size[1], frame_size[0], 3), 255, dtype='uint8')
frame_number = 0
key = -1
while frame is not None:
start = time.time()
mask = None
detections = segmentation.get_detections([frame])
expand_mask(detections, frame_size[0] // 27)
if len(detections[0]) > 0:
mask = detections[0][0][2]
for i in range(1, len(detections[0])):
mask = cv2.bitwise_or(mask, detections[0][i][2])
if mask is not None:
mask = np.stack([mask, mask, mask], axis=-1)
else:
mask = np.zeros(frame.shape, dtype='uint8')
clear_frame = remove_background(frame, invert_colors=not black_board)
output_frame = np.where(mask, output_frame, clear_frame)
merged_frame = np.vstack([frame, output_frame])
merged_frame = cv2.resize(merged_frame, out_frame_size)
if output_video is not None:
output_video.write(merged_frame)
presenter.drawGraphs(merged_frame)
if not args.no_show:
cv2.imshow(WINNAME, merged_frame)
key = check_pressed_keys(key)
if key == 27: # 'Esc'
break
if key == ord('i'): # catch pressing of key 'i'
black_board = not black_board
output_frame = 255 - output_frame
else:
presenter.handleKey(key)
if mouse.crop_available:
x0, x1 = min(mouse.points[0][0], mouse.points[1][0]), \
max(mouse.points[0][0], mouse.points[1][0])
y0, y1 = min(mouse.points[0][1], mouse.points[1][1]), \
max(mouse.points[0][1], mouse.points[1][1])
x1, y1 = min(x1, output_frame.shape[1] - 1), min(y1, output_frame.shape[0] - 1)
board = output_frame[y0: y1, x0: x1, :]
if board.shape[0] > 0 and board.shape[1] > 0:
cv2.namedWindow('Board', cv2.WINDOW_KEEPRATIO)
cv2.imshow('Board', board)
end = time.time()
print('\rProcessing frame: {}, fps = {:.3}' \
.format(frame_number, 1. / (end - start)), end="")
frame_number += 1
frame = cap.read()
print('')
log.info(presenter.reportMeans())
if output_video is not None:
output_video.release()
|
17,388 |
def format_item(x, timedelta_format=None, quote_strings=True):
"""Returns a succinct summary of an object as a string"""
if isinstance(x, (np.datetime64, datetime)):
return format_timestamp(x)
if isinstance(x, (np.timedelta64, timedelta)):
return format_timedelta(x, timedelta_format=timedelta_format)
elif isinstance(x, (str, bytes)):
return repr(x) if quote_strings else x
elif isinstance(x, (float, np.float_)):
return f"{x:.4}"
else:
return str(x)
|
def format_item(x, timedelta_format=None, quote_strings=True):
"""Returns a succinct summary of an object as a string"""
if isinstance(x, (np.datetime64, datetime)):
return format_timestamp(x)
if isinstance(x, (np.timedelta64, timedelta)):
return format_timedelta(x, timedelta_format=timedelta_format)
elif isinstance(x, (str, bytes)):
return repr(x) if quote_strings else x
elif isinstance(x, float):
return f"{x:.4}"
else:
return str(x)
|
31,805 |
def fetch_indicators(client: Client,
tags: List[str],
attribute_type: List[str],
query: Optional[str],
tlp_color: Optional[str],
url: Optional[str],
limit: int = -1) -> List[Dict]:
if query:
params_dict = clean_user_query(query)
else:
params_dict = build_params_dict(tags, attribute_type)
response = client.search_query(params_dict)
indicators_iterator = build_indicators_iterator(response, url)
indicators = []
if limit > 0:
indicators_iterator = indicators_iterator[:limit]
for indicator in indicators_iterator:
value_ = indicator['value']['value']
type_ = indicator['type']
raw_type = indicator.pop('raw_type')
raw_data = {
'value': value_,
'type': type_,
}
for key, value in indicator.items():
raw_data.update({key: value})
indicator_obj = build_indicator(value_, type_, raw_data)
update_indicator_fields(indicator_obj, tlp_color, raw_type)
galaxy_indicators = build_indicators_from_galaxies(indicator_obj)
indicators.extend(galaxy_indicators)
create_and_add_relationships(indicator_obj, galaxy_indicators)
indicators.append(indicator_obj)
return indicators
|
def fetch_indicators(client: Client,
tags: List[str],
attribute_type: List[str],
query: Optional[str],
tlp_color: Optional[str],
url: Optional[str],
limit: int = -1) -> List[Dict]:
if query:
params_dict = clean_user_query(query)
else:
params_dict = build_params_dict(tags, attribute_type)
response = client.search_query(params_dict)
indicators_iterator = build_indicators_iterator(response, url)
indicators = []
if limit > 0:
indicators_iterator = indicators_iterator[:limit]
for indicator in indicators_iterator:
value_ = indicator['value']['value']
type_ = indicator['type']
raw_type = indicator.pop('raw_type')
raw_data = {
'value': value_,
'type': type_,
}
raw_data.update(indicator)
indicator_obj = build_indicator(value_, type_, raw_data)
update_indicator_fields(indicator_obj, tlp_color, raw_type)
galaxy_indicators = build_indicators_from_galaxies(indicator_obj)
indicators.extend(galaxy_indicators)
create_and_add_relationships(indicator_obj, galaxy_indicators)
indicators.append(indicator_obj)
return indicators
|
5,632 |
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba', for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.cheby2(12, 20, 17, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 17 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2', fs=fs)
|
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.cheby2(12, 20, 17, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 17 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2', fs=fs)
|
291 |
def sample_smc(
draws=1000,
kernel="metropolis",
n_steps=25,
parallel=False,
start=None,
cores=None,
tune_scaling=True,
tune_steps=True,
scaling=1.0,
p_acc_rate=0.99,
threshold=0.5,
epsilon=1.0,
dist_func="absolute_error",
sum_stat=False,
progressbar=False,
model=None,
random_seed=-1,
):
"""
Sequential Monte Carlo based sampling
Parameters
----------
draws : int
The number of samples to draw from the posterior (i.e. last stage). And also the number of
independent chains. Defaults to 1000.
kernel : str
Kernel method for the SMC sampler. Available option are ``metropolis`` (default) and `ABC`.
Use `ABC` for likelihood free inference togheter with a ``pm.Simulator``.
n_steps : int
The number of steps of each Markov Chain. If ``tune_steps == True`` ``n_steps`` will be used
for the first stage and for the others it will be determined automatically based on the
acceptance rate and `p_acc_rate`, the max number of steps is ``n_steps``.
parallel : bool
Distribute computations across cores if the number of cores is larger than 1.
Defaults to False.
start : dict, or array of dict
Starting point in parameter space. It should be a list of dict with length `chains`.
When None (default) the starting point is sampled from the prior distribution.
cores : int
The number of chains to run in parallel. If ``None`` (default), it will be automatically
set to the number of CPUs in the system.
tune_scaling : bool
Whether to compute the scaling factor automatically or not. Defaults to True
tune_steps : bool
Whether to compute the number of steps automatically or not. Defaults to True
scaling : float
Scaling factor applied to the proposal distribution i.e. the step size of the Markov Chain.
If ``tune_scaling == True`` (defaults) it will be determined automatically at each stage.
p_acc_rate : float
Used to compute ``n_steps`` when ``tune_steps == True``. The higher the value of
``p_acc_rate`` the higher the number of steps computed automatically. Defaults to 0.99.
It should be between 0 and 1.
threshold : float
Determines the change of beta from stage to stage, i.e.indirectly the number of stages,
the higher the value of `threshold` the higher the number of stages. Defaults to 0.5.
It should be between 0 and 1.
epsilon : float
Standard deviation of the gaussian pseudo likelihood. Only works with `kernel = ABC`
dist_func : str
Distance function. Available options are ``absolute_error`` (default) and
``sum_of_squared_distance``. Only works with ``kernel = ABC``
sum_stat : bool
Whether to use or not a summary statistics. Defaults to False. Only works with
``kernel = ABC``
progressbar : bool
Flag for displaying a progress bar. Defaults to False.
model : Model (optional if in ``with`` context)).
random_seed : int
random seed
Notes
-----
SMC works by moving through successive stages. At each stage the inverse temperature
:math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0
we have the prior distribution and when :math:`\beta` =1 we have the posterior distribution.
So in more general terms we are always computing samples from a tempered posterior that we can
write as:
.. math::
p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta)
A summary of the algorithm is:
1. Initialize :math:`\beta` at zero and stage at zero.
2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the
tempered posterior is the prior).
3. Increase :math:`\beta` in order to make the effective sample size equals some predefined
value (we use :math:`Nt`, where :math:`t` is 0.5 by default).
4. Compute a set of N importance weights W. The weights are computed as the ratio of the
likelihoods of a sample at stage i+1 and stage i.
5. Obtain :math:`S_{w}` by re-sampling according to W.
6. Use W to compute the covariance for the proposal distribution.
7. For stages other than 0 use the acceptance rate from the previous stage to estimate the
scaling of the proposal distribution and `n_steps`.
8. Run N Metropolis chains (each one of length `n_steps`), starting each one from a different
sample in :math:`S_{w}`.
9. Repeat from step 3 until :math:`\beta \ge 1`.
10. The final result is a collection of N samples from the posterior.
References
----------
.. [Minson2013] Minson, S. E. and Simons, M. and Beck, J. L., (2013),
Bayesian inversion for finite fault earthquake source models I- Theory and algorithm.
Geophysical Journal International, 2013, 194(3), pp.1701-1726,
`link <https://gji.oxfordjournals.org/content/194/3/1701.full>`__
.. [Ching2007] Ching, J. and Chen, Y. (2007).
Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class
Selection, and Model Averaging. J. Eng. Mech., 10.1061/(ASCE)0733-9399(2007)133:7(816),
816-832. `link <http://ascelibrary.org/doi/abs/10.1061/%28ASCE%290733-9399
%282007%29133:7%28816%29>`__
"""
model = modelcontext(model)
if random_seed != -1:
np.random.seed(random_seed)
if cores is None:
cores = _cpu_count()
beta = 0
stage = 0
accepted = 0
acc_rate = 1.0
max_steps = n_steps
proposed = draws * n_steps
model.marginal_likelihood = 1
variables = inputvars(model.vars)
discrete = np.concatenate([[v.dtype in pm.discrete_types] * (v.dsize or 1) for v in variables])
any_discrete = discrete.any()
all_discrete = discrete.all()
shared = make_shared_replacements(variables, model)
prior_logp = logp_forw([model.varlogpt], variables, shared)
pm._log.info("Sample initial stage: ...")
posterior, var_info = _initial_population(draws, model, variables, start)
if kernel.lower() == "abc":
warnings.warn(EXPERIMENTAL_WARNING)
simulator = model.observed_RVs[0]
likelihood_logp = PseudoLikelihood(
epsilon,
simulator.observations,
simulator.distribution.function,
model,
var_info,
dist_func,
sum_stat,
)
elif kernel.lower() == "metropolis":
likelihood_logp = logp_forw([model.datalogpt], variables, shared)
if parallel and cores > 1:
pool = mp.Pool(processes=cores)
while beta < 1:
if parallel and cores > 1:
results = pool.starmap(likelihood_logp, [(sample,) for sample in posterior])
else:
results = [likelihood_logp(sample) for sample in posterior]
likelihoods = np.array(results).squeeze()
beta, old_beta, weights, sj, = calc_beta(beta, likelihoods, threshold)
model.marginal_likelihood *= sj
# resample based on plausibility weights (selection)
resampling_indexes = np.random.choice(np.arange(draws), size=draws, p=weights)
posterior = posterior[resampling_indexes]
likelihoods = likelihoods[resampling_indexes]
# compute proposal distribution based on weights
covariance = _calc_covariance(posterior, weights)
proposal = MultivariateNormalProposal(covariance)
# compute scaling (optional) and number of Markov chains steps (optional), based on the
# acceptance rate of the previous stage
if (tune_scaling or tune_steps) and stage > 0:
scaling, n_steps = _tune(
acc_rate, proposed, tune_scaling, tune_steps, scaling, max_steps, p_acc_rate
)
pm._log.info("Stage: {:3d} Beta: {:.3f} Steps: {:3d}".format(stage, beta, n_steps))
# Apply Metropolis kernel (mutation)
proposed = draws * n_steps
priors = np.array([prior_logp(sample) for sample in posterior]).squeeze()
tempered_logp = priors + likelihoods * beta
parameters = (
proposal,
scaling,
accepted,
any_discrete,
all_discrete,
discrete,
n_steps,
prior_logp,
likelihood_logp,
beta,
)
if parallel and cores > 1:
results = pool.starmap(
metrop_kernel,
[(posterior[draw], tempered_logp[draw], *parameters) for draw in range(draws)],
)
else:
results = [
metrop_kernel(posterior[draw], tempered_logp[draw], *parameters)
for draw in tqdm(range(draws), disable=not progressbar)
]
posterior, acc_list = zip(*results)
posterior = np.array(posterior)
acc_rate = sum(acc_list) / proposed
stage += 1
if parallel and cores > 1:
pool.close()
pool.join()
trace = _posterior_to_trace(posterior, variables, model, var_info)
return trace
|
def sample_smc(
draws=1000,
kernel="metropolis",
n_steps=25,
parallel=False,
start=None,
cores=None,
tune_scaling=True,
tune_steps=True,
scaling=1.0,
p_acc_rate=0.99,
threshold=0.5,
epsilon=1.0,
dist_func="absolute_error",
sum_stat=False,
progressbar=False,
model=None,
random_seed=-1,
):
"""
Sequential Monte Carlo based sampling
Parameters
----------
draws : int
The number of samples to draw from the posterior (i.e. last stage). And also the number of
independent chains. Defaults to 1000.
kernel : str
Kernel method for the SMC sampler. Available option are ``metropolis`` (default) and `ABC`.
Use `ABC` for likelihood free inference togheter with a ``pm.Simulator``.
n_steps : int
The number of steps of each Markov Chain. If ``tune_steps == True`` ``n_steps`` will be used
for the first stage and for the others it will be determined automatically based on the
acceptance rate and `p_acc_rate`, the max number of steps is ``n_steps``.
parallel : bool
Distribute computations across cores if the number of cores is larger than 1.
Defaults to False.
start : dict, or array of dict
Starting point in parameter space. It should be a list of dict with length `chains`.
When None (default) the starting point is sampled from the prior distribution.
cores : int
The number of chains to run in parallel. If ``None`` (default), it will be automatically
set to the number of CPUs in the system.
tune_scaling : bool
Whether to compute the scaling factor automatically or not. Defaults to True
tune_steps : bool
Whether to compute the number of steps automatically or not. Defaults to True
scaling : float
Scaling factor applied to the proposal distribution i.e. the step size of the Markov Chain.
If ``tune_scaling == True`` (defaults) it will be determined automatically at each stage.
p_acc_rate : float
Used to compute ``n_steps`` when ``tune_steps == True``. The higher the value of
``p_acc_rate`` the higher the number of steps computed automatically. Defaults to 0.99.
It should be between 0 and 1.
threshold : float
Determines the change of beta from stage to stage, i.e.indirectly the number of stages,
the higher the value of `threshold` the higher the number of stages. Defaults to 0.5.
It should be between 0 and 1.
epsilon : float
Standard deviation of the gaussian pseudo likelihood. Only works with `kernel = ABC`
dist_func : str
Distance function. Available options are ``absolute_error`` (default) and
``sum_of_squared_distance``. Only works with ``kernel = ABC``
sum_stat : bool
Whether to use or not a summary statistics. Defaults to False. Only works with
``kernel = ABC``
progressbar : bool
Flag for displaying a progress bar. Defaults to False.
model : Model (optional if in ``with`` context)).
random_seed : int
random seed
Notes
-----
SMC works by moving through successive stages. At each stage the inverse temperature
:math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0
we have the prior distribution and when :math:`\beta` =1 we have the posterior distribution.
So in more general terms we are always computing samples from a tempered posterior that we can
write as:
.. math::
p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta)
A summary of the algorithm is:
1. Initialize :math:`\beta` at zero and stage at zero.
2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the
tempered posterior is the prior).
3. Increase :math:`\beta` in order to make the effective sample size equals some predefined
value (we use :math:`Nt`, where :math:`t` is 0.5 by default).
4. Compute a set of N importance weights W. The weights are computed as the ratio of the
likelihoods of a sample at stage i+1 and stage i.
5. Obtain :math:`S_{w}` by re-sampling according to W.
6. Use W to compute the covariance for the proposal distribution.
7. For stages other than 0 use the acceptance rate from the previous stage to estimate the
scaling of the proposal distribution and `n_steps`.
8. Run N Metropolis chains (each one of length `n_steps`), starting each one from a different
sample in :math:`S_{w}`.
9. Repeat from step 3 until :math:`\beta \ge 1`.
10. The final result is a collection of N samples from the posterior.
References
----------
.. [Minson2013] Minson, S. E. and Simons, M. and Beck, J. L., (2013),
Bayesian inversion for finite fault earthquake source models I- Theory and algorithm.
Geophysical Journal International, 2013, 194(3), pp.1701-1726,
`link <https://gji.oxfordjournals.org/content/194/3/1701.full>`__
.. [Ching2007] Ching, J. and Chen, Y. (2007).
Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class
Selection, and Model Averaging. J. Eng. Mech., 10.1061/(ASCE)0733-9399(2007)133:7(816),
816-832. `link <http://ascelibrary.org/doi/abs/10.1061/%28ASCE%290733-9399
%282007%29133:7%28816%29>`__
"""
model = modelcontext(model)
if random_seed != -1:
np.random.seed(random_seed)
if cores is None:
cores = _cpu_count()
beta = 0
stage = 0
accepted = 0
acc_rate = 1.0
max_steps = n_steps
proposed = draws * n_steps
model.marginal_likelihood = 1
variables = inputvars(model.vars)
discrete = np.concatenate([[v.dtype in pm.discrete_types] * (v.dsize or 1) for v in variables])
any_discrete = discrete.any()
all_discrete = discrete.all()
shared = make_shared_replacements(variables, model)
prior_logp = logp_forw([model.varlogpt], variables, shared)
pm._log.info("Sample initial stage: ...")
posterior, var_info = _initial_population(draws, model, variables, start)
if kernel.lower() == "abc":
warnings.warn(EXPERIMENTAL_WARNING)
simulator = model.observed_RVs[0]
likelihood_logp = PseudoLikelihood(
epsilon,
simulator.observations,
simulator.distribution.function,
model,
var_info,
dist_func,
sum_stat,
)
elif kernel.lower() == "metropolis":
likelihood_logp = logp_forw([model.datalogpt], variables, shared)
if parallel and cores > 1:
pool = mp.Pool(processes=cores)
while beta < 1:
if parallel and cores > 1:
results = pool.starmap(likelihood_logp, [(sample,) for sample in posterior])
else:
results = [likelihood_logp(sample) for sample in posterior]
likelihoods = np.array(results).squeeze()
beta, old_beta, weights, sj = calc_beta(beta, likelihoods, threshold)
model.marginal_likelihood *= sj
# resample based on plausibility weights (selection)
resampling_indexes = np.random.choice(np.arange(draws), size=draws, p=weights)
posterior = posterior[resampling_indexes]
likelihoods = likelihoods[resampling_indexes]
# compute proposal distribution based on weights
covariance = _calc_covariance(posterior, weights)
proposal = MultivariateNormalProposal(covariance)
# compute scaling (optional) and number of Markov chains steps (optional), based on the
# acceptance rate of the previous stage
if (tune_scaling or tune_steps) and stage > 0:
scaling, n_steps = _tune(
acc_rate, proposed, tune_scaling, tune_steps, scaling, max_steps, p_acc_rate
)
pm._log.info("Stage: {:3d} Beta: {:.3f} Steps: {:3d}".format(stage, beta, n_steps))
# Apply Metropolis kernel (mutation)
proposed = draws * n_steps
priors = np.array([prior_logp(sample) for sample in posterior]).squeeze()
tempered_logp = priors + likelihoods * beta
parameters = (
proposal,
scaling,
accepted,
any_discrete,
all_discrete,
discrete,
n_steps,
prior_logp,
likelihood_logp,
beta,
)
if parallel and cores > 1:
results = pool.starmap(
metrop_kernel,
[(posterior[draw], tempered_logp[draw], *parameters) for draw in range(draws)],
)
else:
results = [
metrop_kernel(posterior[draw], tempered_logp[draw], *parameters)
for draw in tqdm(range(draws), disable=not progressbar)
]
posterior, acc_list = zip(*results)
posterior = np.array(posterior)
acc_rate = sum(acc_list) / proposed
stage += 1
if parallel and cores > 1:
pool.close()
pool.join()
trace = _posterior_to_trace(posterior, variables, model, var_info)
return trace
|
40,620 |
def make_pos_and_tag(tag: str, sep: str = " ",
return_mode: Optional[str] = None) -> Tuple[str, Union[str, list, dict, tuple]]:
"""
Args:
tag: the part-of-speech tag
sep: the separator between part-of-speech tag and grammatical features
return_mode: the type of return value, can be None, list, dict or sorted_dict
Returns:
the part-of-speech label and grammatical features in required format
"""
if tag.endswith(" _"):
tag = tag[:-2]
if sep in tag:
pos, tag = tag.split(sep, maxsplit=1)
else:
pos, tag = tag, ("_" if return_mode is None else "")
if return_mode in ["dict", "list", "sorted_dict"]:
tag = tag.split("|") if tag != "" else []
if "dict" in return_mode:
tag = dict(tuple(elem.split("=")) for elem in tag)
if return_mode == "sorted_dict":
tag = tuple(sorted(tag.items()))
return pos, tag
|
def make_pos_and_tag(tag: str, sep: str = ",",
return_mode: Optional[str] = None) -> Tuple[str, Union[str, list, dict, tuple]]:
"""
Args:
tag: the part-of-speech tag
sep: the separator between part-of-speech tag and grammatical features
return_mode: the type of return value, can be None, list, dict or sorted_dict
Returns:
the part-of-speech label and grammatical features in required format
"""
if tag.endswith(" _"):
tag = tag[:-2]
if sep in tag:
pos, tag = tag.split(sep, maxsplit=1)
else:
pos, tag = tag, ("_" if return_mode is None else "")
if return_mode in ["dict", "list", "sorted_dict"]:
tag = tag.split("|") if tag != "" else []
if "dict" in return_mode:
tag = dict(tuple(elem.split("=")) for elem in tag)
if return_mode == "sorted_dict":
tag = tuple(sorted(tag.items()))
return pos, tag
|
39,879 |
def _get_infura_provider(provider_uri):
# https://web3py.readthedocs.io/en/latest/providers.html#infura-mainnet
uri_breakdown = urlparse(provider_uri)
infura_envvar = 'WEB3_INFURA_PROJECT_ID'
os.environ[infura_envvar] = os.environ.get(infura_envvar, uri_breakdown.netloc)
try:
# TODO: Only testnet for now
from web3.auto.infura.goerli import w3
except InfuraKeyNotFound:
raise ProviderError(f'{infura_envvar} must be provided in order to use an Infura Web3 provider.')
# Verify Connection
connected = w3.isConnected()
if not connected:
raise ProviderError('Failed to connect to Infura node.')
return w3.provider
|
def _get_infura_provider(provider_uri):
# https://web3py.readthedocs.io/en/latest/providers.html#infura-mainnet
uri_breakdown = urlparse(provider_uri)
infura_envvar = 'WEB3_INFURA_PROJECT_ID'
os.environ[infura_envvar] = os.environ.get(infura_envvar, uri_breakdown.netloc)
try:
# TODO: Only testnet for now
from web3.auto.infura.goerli import w3
except InfuraKeyNotFound:
raise ProviderError(f'{infura_envvar} must be provided in order to use an Infura Web3 provider {provider_uri}.')
# Verify Connection
connected = w3.isConnected()
if not connected:
raise ProviderError('Failed to connect to Infura node.')
return w3.provider
|
22,720 |
def _prepare_environ(workspace):
new_environ = os.environ.copy()
new_environ['TMPDIR'] = workspace
# So, pytest is nice, and a little to for our usage.
# In order to help user to call seamlessly any piece of python code without requiring to
# install it as a full-fledged setuptools distribution for instance, it injects the current
# path into the PYTHONPATH environment variable. This allows the python interpreter to import
# as modules any python file available in current working directory.
# See https://docs.pytest.org/en/3.2.5/pythonpath.html for the explanation and description.
# However this behavior is not good in integration tests, in particular the nginx oldest ones.
# Indeed during these kind of tests certbot is installed as a transitive dependency to
# certbot-nginx. Here is the trick: this certbot version is not necessarily the same than
# the certbot codebase lying in current working directory. For instance in oldest tests
# certbot==0.36.0 may be installed while the codebase corresponds to certbot==0.37.0.dev0.
# If at this point PYTHONPATH is set up like pytest does, invoking certbot will import the
# modules from the codebase (0.37.0.dev0), not from the required/installed version (0.36.0).
# This will lead to funny and totally incomprehensible errors. To avoid that, we ensure that if
# PYTHONPATH is set, it does not contain the current working directory.
if new_environ.get('PYTHONPATH'):
# certbot_integration_tests.__file__ is:
# '/path/to/certbot/certbot-ci/certbot_integration_tests/__init__.pyc'
# ... and we want '/path/to/certbot'
certbot_root = os.path.dirname(os.path.dirname(os.path.dirname(certbot_integration_tests.__file__)))
python_paths = [path for path in new_environ['PYTHONPATH'].split(':') if path != certbot_root]
new_environ['PYTHONPATH'] = ':'.join(python_paths)
return new_environ
|
def _prepare_environ(workspace):
new_environ = os.environ.copy()
new_environ['TMPDIR'] = workspace
# So, pytest is nice, and a little to for our usage.
# In order to help user to call seamlessly any piece of python code without requiring to
# install it as a full-fledged setuptools distribution for instance, it injects the current
# path into the PYTHONPATH environment variable. This allows the python interpreter to import
# as modules any python file available in current working directory.
# See https://docs.pytest.org/en/3.2.5/pythonpath.html for the explanation and description.
# However this behavior is not good in integration tests, in particular the nginx oldest ones.
# Indeed during these kind of tests certbot is installed as a transitive dependency to
# certbot-nginx. Here is the trick: this certbot version is not necessarily the same as
# the certbot codebase lying in current working directory. For instance in oldest tests
# certbot==0.36.0 may be installed while the codebase corresponds to certbot==0.37.0.dev0.
# If at this point PYTHONPATH is set up like pytest does, invoking certbot will import the
# modules from the codebase (0.37.0.dev0), not from the required/installed version (0.36.0).
# This will lead to funny and totally incomprehensible errors. To avoid that, we ensure that if
# PYTHONPATH is set, it does not contain the current working directory.
if new_environ.get('PYTHONPATH'):
# certbot_integration_tests.__file__ is:
# '/path/to/certbot/certbot-ci/certbot_integration_tests/__init__.pyc'
# ... and we want '/path/to/certbot'
certbot_root = os.path.dirname(os.path.dirname(os.path.dirname(certbot_integration_tests.__file__)))
python_paths = [path for path in new_environ['PYTHONPATH'].split(':') if path != certbot_root]
new_environ['PYTHONPATH'] = ':'.join(python_paths)
return new_environ
|
8,786 |
def botfactory(settings, autoloads=None):
"""Build a test instance of Sopel, with a :class:`MockIRCBackend`.
:param settings: Sopel's configuration for testing purposes
:type settings: :class:`sopel.config.Config`
:param list autoloads: list of plugins to autoload after creating the bot
:return: a test instance of the bot
:rtype: :class:`sopel.bot.Sopel`
This will instanciate a :class:`~sopel.bot.Sopel` object, replace its
backend with a :class:`~MockIRCBackend`, and then autoload plugins. This
will automatically load the ``coretasks`` plugins, and every other plugins
from ``autoloads``::
bot = botfactory(settings, ['emoticons', 'find', 'remind'])
"""
autoloads = set(autoloads or []) | {'coretasks'}
mockbot = Sopel(settings)
mockbot.backend = MockIRCBackend(mockbot)
usable_plugins = sopel.plugins.get_usable_plugins(settings)
for name in autoloads:
plugin = usable_plugins[name][0]
plugin.load()
plugin.register(mockbot)
return mockbot
|
def botfactory(settings, autoloads=None):
"""Build a test instance of Sopel, with a :class:`MockIRCBackend`.
:param settings: Sopel's configuration for testing purposes
:type settings: :class:`sopel.config.Config`
:param list autoloads: list of plugins to autoload after creating the bot
:return: a test instance of the bot
:rtype: :class:`sopel.bot.Sopel`
This will instantiate a :class:`~sopel.bot.Sopel` object, replace its
backend with a :class:`~MockIRCBackend`, and then autoload plugins. This
will automatically load the ``coretasks`` plugins, and every other plugins
from ``autoloads``::
bot = botfactory(settings, ['emoticons', 'find', 'remind'])
"""
autoloads = set(autoloads or []) | {'coretasks'}
mockbot = Sopel(settings)
mockbot.backend = MockIRCBackend(mockbot)
usable_plugins = sopel.plugins.get_usable_plugins(settings)
for name in autoloads:
plugin = usable_plugins[name][0]
plugin.load()
plugin.register(mockbot)
return mockbot
|
17,771 |
def merge(datasets, bounds=None, res=None, nodata=None, precision=10, indexes=None,
output_count=None, method='first'):
"""Copy valid pixels from input files to an output file.
All files must have the same number of bands, data type, and
coordinate reference system.
Input files are merged in their listed order using the reverse
painter's algorithm (default) or another method. If the output file exists,
its values will be overwritten by input values.
Geospatial bounds and resolution of a new output file in the
units of the input file coordinate reference system may be provided
and are otherwise taken from the first input file.
Parameters
----------
datasets: list of dataset objects opened in 'r' mode
source datasets to be merged.
bounds: tuple, optional
Bounds of the output image (left, bottom, right, top).
If not set, bounds are determined from bounds of input rasters.
res: tuple, optional
Output resolution in units of coordinate reference system. If not set,
the resolution of the first raster is used. If a single value is passed,
output pixels will be square.
nodata: float, optional
nodata value to use in output file. If not set, uses the nodata value
in the first input raster.
precision: float, optional
Number of decimal points of precision when computing inverse transform.
indexes : list of ints or a single int, optional
bands to read and merge
output_count: int, optional
If using callable it may be useful to have additional bands in the output
in addition to the indexes specified for read
method : str or callable
pre-defined method:
first: reverse painting
last: paint valid new on top of existing
min: pixel-wise min of existing and new
max: pixel-wise max of existing and new
or custom callable with signature:
def function(old_data, new_data, old_nodata, new_nodata, index=None, roff=None, coff=None):
Parameters
----------
old_data : array_like
array to update with new_data
new_data : array_like
data to merge
same shape as old_data
old_nodata, new_data : array_like
boolean masks where old/new data is nodata
same shape as old_data
index: int
index of the current dataset within the merged dataset collection
roff: int
row offset in base array
coff: int
column offset in base array
Returns
-------
tuple
Two elements:
dest: numpy ndarray
Contents of all input rasters in single array
out_transform: affine.Affine()
Information for mapping pixel coordinates in `dest` to another
coordinate system
"""
first = datasets[0]
first_res = first.res
nodataval = first.nodatavals[0]
dtype = first.dtypes[0]
if method not in MERGE_METHODS and not callable(method):
raise ValueError('Unknown method {0}, must be one of {1} or callable'
.format(method, MERGE_METHODS))
# Determine output band count
if indexes is None:
src_count = first.count
elif isinstance(indexes, int):
src_count = indexes
else:
src_count = len(indexes)
if not output_count:
output_count = src_count
# Extent from option or extent of all inputs
if bounds:
dst_w, dst_s, dst_e, dst_n = bounds
else:
# scan input files
xs = []
ys = []
for src in datasets:
left, bottom, right, top = src.bounds
xs.extend([left, right])
ys.extend([bottom, top])
dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)
logger.debug("Output bounds: %r", (dst_w, dst_s, dst_e, dst_n))
output_transform = Affine.translation(dst_w, dst_n)
logger.debug("Output transform, before scaling: %r", output_transform)
# Resolution/pixel size
if not res:
res = first_res
elif not np.iterable(res):
res = (res, res)
elif len(res) == 1:
res = (res[0], res[0])
output_transform *= Affine.scale(res[0], -res[1])
logger.debug("Output transform, after scaling: %r", output_transform)
# Compute output array shape. We guarantee it will cover the output
# bounds completely
output_width = int(math.ceil((dst_e - dst_w) / res[0]))
output_height = int(math.ceil((dst_n - dst_s) / res[1]))
# Adjust bounds to fit
dst_e, dst_s = output_transform * (output_width, output_height)
logger.debug("Output width: %d, height: %d", output_width, output_height)
logger.debug("Adjusted bounds: %r", (dst_w, dst_s, dst_e, dst_n))
# create destination array
dest = np.zeros((output_count, output_height, output_width), dtype=dtype)
if nodata is not None:
nodataval = nodata
logger.debug("Set nodataval: %r", nodataval)
if nodataval is not None:
# Only fill if the nodataval is within dtype's range
inrange = False
if np.dtype(dtype).kind in ('i', 'u'):
info = np.iinfo(dtype)
inrange = (info.min <= nodataval <= info.max)
elif np.dtype(dtype).kind == 'f':
info = np.finfo(dtype)
if np.isnan(nodataval):
inrange = True
else:
inrange = (info.min <= nodataval <= info.max)
if inrange:
dest.fill(nodataval)
else:
warnings.warn(
"Input file's nodata value, %s, is beyond the valid "
"range of its data type, %s. Consider overriding it "
"using the --nodata option for better results." % (
nodataval, dtype))
else:
nodataval = 0
if method == 'first':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = np.logical_and(old_nodata, ~new_nodata)
old_data[mask] = new_data[mask]
elif method == 'last':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = ~new_nodata
old_data[mask] = new_data[mask]
elif method == 'min':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = np.logical_and(~old_nodata, ~new_nodata)
old_data[mask] = np.minimum(old_data[mask], new_data[mask])
mask = np.logical_and(old_nodata, ~new_nodata)
old_data[mask] = new_data[mask]
elif method == 'max':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = np.logical_and(~old_nodata, ~new_nodata)
old_data[mask] = np.maximum(old_data[mask], new_data[mask])
mask = np.logical_and(old_nodata, ~new_nodata)
old_data[mask] = new_data[mask]
elif callable(method):
copyto = method
else:
raise ValueError(method)
for idx, src in enumerate(datasets):
# Real World (tm) use of boundless reads.
# This approach uses the maximum amount of memory to solve the
# problem. Making it more efficient is a TODO.
# 1. Compute spatial intersection of destination and source
src_w, src_s, src_e, src_n = src.bounds
int_w = src_w if src_w > dst_w else dst_w
int_s = src_s if src_s > dst_s else dst_s
int_e = src_e if src_e < dst_e else dst_e
int_n = src_n if src_n < dst_n else dst_n
# 2. Compute the source window
src_window = windows.from_bounds(
int_w, int_s, int_e, int_n, src.transform, precision=precision)
logger.debug("Src %s window: %r", src.name, src_window)
src_window = src_window.round_shape()
# 3. Compute the destination window
dst_window = windows.from_bounds(
int_w, int_s, int_e, int_n, output_transform, precision=precision)
# 4. Read data in source window into temp
trows, tcols = (
int(round(dst_window.height)), int(round(dst_window.width)))
temp_shape = (src_count, trows, tcols)
temp = src.read(out_shape=temp_shape, window=src_window,
boundless=False, masked=True, indexes=indexes)
# 5. Copy elements of temp into dest
roff, coff = (
int(round(dst_window.row_off)), int(round(dst_window.col_off)))
region = dest[:, roff:roff + trows, coff:coff + tcols]
if np.isnan(nodataval):
region_nodata = np.isnan(region)
temp_nodata = np.isnan(temp)
else:
region_nodata = region == nodataval
temp_nodata = temp.mask
sig = signature(copyto)
if len(sig.parameters.keys()) == 7:
copyto(region, temp, region_nodata, temp_nodata, idx, roff, coff)
else:
copyto(region, temp, region_nodata, temp_nodata)
return dest, output_transform
|
def merge(datasets, bounds=None, res=None, nodata=None, precision=10, indexes=None,
output_count=None, method='first'):
"""Copy valid pixels from input files to an output file.
All files must have the same number of bands, data type, and
coordinate reference system.
Input files are merged in their listed order using the reverse
painter's algorithm (default) or another method. If the output file exists,
its values will be overwritten by input values.
Geospatial bounds and resolution of a new output file in the
units of the input file coordinate reference system may be provided
and are otherwise taken from the first input file.
Parameters
----------
datasets: list of dataset objects opened in 'r' mode
source datasets to be merged.
bounds: tuple, optional
Bounds of the output image (left, bottom, right, top).
If not set, bounds are determined from bounds of input rasters.
res: tuple, optional
Output resolution in units of coordinate reference system. If not set,
the resolution of the first raster is used. If a single value is passed,
output pixels will be square.
nodata: float, optional
nodata value to use in output file. If not set, uses the nodata value
in the first input raster.
precision: float, optional
Number of decimal points of precision when computing inverse transform.
indexes : list of ints or a single int, optional
bands to read and merge
output_count: int, optional
If using callable it may be useful to have additional bands in the output
in addition to the indexes specified for read
method : str or callable
pre-defined method:
first: reverse painting
last: paint valid new on top of existing
min: pixel-wise min of existing and new
max: pixel-wise max of existing and new
or custom callable with signature:
def function(old_data, new_data, old_nodata, new_nodata, index=None, roff=None, coff=None):
Parameters
----------
old_data : array_like
array to update with new_data
new_data : array_like
data to merge
same shape as old_data
old_nodata, new_data : array_like
boolean masks where old/new data is nodata
same shape as old_data
index: int
index of the current dataset within the merged dataset collection
roff: int
row offset in base array
coff: int
column offset in base array
Returns
-------
tuple
Two elements:
dest: numpy ndarray
Contents of all input rasters in single array
out_transform: affine.Affine()
Information for mapping pixel coordinates in `dest` to another
coordinate system
"""
first = datasets[0]
first_res = first.res
nodataval = first.nodatavals[0]
dtype = first.dtypes[0]
if method not in MERGE_METHODS and not callable(method):
raise ValueError('Unknown method {0}, must be one of {1} or callable'
.format(method, MERGE_METHODS))
# Determine output band count
if indexes is None:
src_count = first.count
elif isinstance(indexes, int):
src_count = indexes
else:
src_count = len(indexes)
if not output_count:
output_count = src_count
# Extent from option or extent of all inputs
if bounds:
dst_w, dst_s, dst_e, dst_n = bounds
else:
# scan input files
xs = []
ys = []
for src in datasets:
left, bottom, right, top = src.bounds
xs.extend([left, right])
ys.extend([bottom, top])
dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)
logger.debug("Output bounds: %r", (dst_w, dst_s, dst_e, dst_n))
output_transform = Affine.translation(dst_w, dst_n)
logger.debug("Output transform, before scaling: %r", output_transform)
# Resolution/pixel size
if not res:
res = first_res
elif not np.iterable(res):
res = (res, res)
elif len(res) == 1:
res = (res[0], res[0])
output_transform *= Affine.scale(res[0], -res[1])
logger.debug("Output transform, after scaling: %r", output_transform)
# Compute output array shape. We guarantee it will cover the output
# bounds completely
output_width = int(math.ceil((dst_e - dst_w) / res[0]))
output_height = int(math.ceil((dst_n - dst_s) / res[1]))
# Adjust bounds to fit
dst_e, dst_s = output_transform * (output_width, output_height)
logger.debug("Output width: %d, height: %d", output_width, output_height)
logger.debug("Adjusted bounds: %r", (dst_w, dst_s, dst_e, dst_n))
# create destination array
dest = np.zeros((output_count, output_height, output_width), dtype=dtype)
if nodata is not None:
nodataval = nodata
logger.debug("Set nodataval: %r", nodataval)
if nodataval is not None:
# Only fill if the nodataval is within dtype's range
inrange = False
if np.dtype(dtype).kind in ('i', 'u'):
info = np.iinfo(dtype)
inrange = (info.min <= nodataval <= info.max)
elif np.dtype(dtype).kind == 'f':
info = np.finfo(dtype)
if np.isnan(nodataval):
inrange = True
else:
inrange = (info.min <= nodataval <= info.max)
if inrange:
dest.fill(nodataval)
else:
warnings.warn(
"Input file's nodata value, %s, is beyond the valid "
"range of its data type, %s. Consider overriding it "
"using the --nodata option for better results." % (
nodataval, dtype))
else:
nodataval = 0
if method == 'first':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = np.logical_and(old_nodata, ~new_nodata)
old_data[mask] = new_data[mask]
elif method == 'last':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = ~new_nodata
old_data[mask] = new_data[mask]
elif method == 'min':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = np.logical_and(~old_nodata, ~new_nodata)
old_data[mask] = np.minimum(old_data[mask], new_data[mask])
mask = np.logical_and(old_nodata, ~new_nodata)
old_data[mask] = new_data[mask]
elif method == 'max':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = np.logical_and(~old_nodata, ~new_nodata)
old_data[mask] = np.maximum(old_data[mask], new_data[mask])
mask = np.logical_and(old_nodata, ~new_nodata)
old_data[mask] = new_data[mask]
elif callable(method):
copyto = method
else:
raise ValueError(method)
for idx, src in enumerate(datasets):
# Real World (tm) use of boundless reads.
# This approach uses the maximum amount of memory to solve the
# problem. Making it more efficient is a TODO.
# 1. Compute spatial intersection of destination and source
src_w, src_s, src_e, src_n = src.bounds
int_w = src_w if src_w > dst_w else dst_w
int_s = src_s if src_s > dst_s else dst_s
int_e = src_e if src_e < dst_e else dst_e
int_n = src_n if src_n < dst_n else dst_n
# 2. Compute the source window
src_window = windows.from_bounds(
int_w, int_s, int_e, int_n, src.transform, precision=precision)
logger.debug("Src %s window: %r", src.name, src_window)
src_window = src_window.round_shape()
# 3. Compute the destination window
dst_window = windows.from_bounds(
int_w, int_s, int_e, int_n, output_transform, precision=precision)
# 4. Read data in source window into temp
trows, tcols = (
int(round(dst_window.height)), int(round(dst_window.width)))
temp_shape = (src_count, trows, tcols)
temp = src.read(out_shape=temp_shape, window=src_window,
boundless=False, masked=True, indexes=indexes)
# 5. Copy elements of temp into dest
roff, coff = (
int(round(dst_window.row_off)), int(round(dst_window.col_off)))
region = dest[:, roff:roff + trows, coff:coff + tcols]
if np.isnan(nodataval):
region_nodata = np.isnan(region)
temp_nodata = np.isnan(temp)
else:
region_nodata = region == nodataval
temp_nodata = temp.mask
copyto(region, temp, region_nodata, temp_nodata, index=dx, roff=roff, coff=coff)
return dest, output_transform
|
5,666 |
def read(filename, mmap=False, int_form='lj'):
"""
Open a WAV file
Return the sample rate (in samples/sec) and data from an LPCM WAV file.
Parameters
----------
filename : string or open file handle
Input WAV file.
mmap : bool, optional
Whether to read data as memory-mapped (default: False). Not compatible
with some bit depths; see Notes. Only to be used on real files.
.. versionadded:: 0.12.0
int_form : {'lj', 'rj', 'fp', 'fs'}
The format ['int_format', 'type', 'dtype', 'format_'?] of the returned
data for integer PCM files. (Float files are always returned in their
native type.) See Notes for details on each format.
- 'lj' [or 'native'?] is the native left-justified integer format of
the WAV itself. This is the only form compatible with mmap.
- 'rj' is right-justified signed integer format.
- 'fp' is float format, normalized with fixed-point convention
to [-1.0, +1.0).
- 'fs' is float format, normalized with full-scale convention
from +1.0 to slightly less than -1.0.
Returns
-------
rate : int
Sample rate of WAV file.
data : numpy array
Data read from WAV file. Data-type is determined from the file;
see Notes. Data is 1-D for 1-channel WAV, or 2-D of shape
(Nsamples, Nchannels) otherwise.
Notes
-----
WAV files can specify arbitrary bit depth, and this function supports
reading any integer PCM depth from 1 to 64 bits.
Non-linear PCM (mu-law, A-law) is not supported.
**Left-justified format**
Data is returned in the smallest compatible numpy int type. 8-bit and
lower is unsigned, while 9-bit and higher is signed.
For example, 24-bit data will be stored as int32, with the MSB of the
24-bit data stored at the MSB of the int32, and typically the least
significant byte is 0x00. (However, if a file actually contains data past
its specified bit depth, those bits will be read and output, too. [2]_)
This bit justification and sign matches WAV's native internal format, which
allows memory mapping of WAV files that use 1, 2, 4, or 8 bytes per sample
(so 24-bit files cannot be memory-mapped, but 32-bit can).
IEEE float PCM in 32- or 64-bit format is supported, with or without mmap.
Values exceeding [-1, +1] are not clipped.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit integer PCM -2147483648 +2147483647 int32
24-bit integer PCM -2147483648 +2147483392 int32
16-bit integer PCM -32768 +32767 int16
8-bit integer PCM 0 255 uint8
===================== =========== =========== =============
**Right-justified format**
Data is returned in the smallest compatible numpy int type, and is always
signed.
For example, 24-bit data will be stored as int32, with the LSB of the
24-bit data stored at the LSB of the int32, and the MSB as 0x00 or 0xff,
depending on sign. (If a file actually contains data past its specified
bit depth, those bits will be discarded during the shift. [2]_)
Examples:
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit integer PCM -2147483648 +2147483647 int32
24-bit integer PCM -8388608 +8388607 int32
16-bit integer PCM -32768 +32767 int16
12-bit integer PCM -2048 +2047 int16
8-bit integer PCM -128 +127 int8
===================== =========== =========== =============
**Fixed-point format**
This converts the integer values to float, dividing by 2**(N-1), which
interprets them as fixed-point numbers with the binary point
to the right of the MSbit, so it can reach -1.0 but cannot reach +1.0.
[3]_ Android [4]_ USB?
An 8-bit file would reach from -1.0 to 0.992 (127/128), for example.
**Full-scale format**
This converts the integer values to float, dividing by 2**(N-1) - 1, using
the convention that full-scale is defined by the highest positive
code. [5]_ AES [6]_ IEC?
An 8-bit file would reach from -1.008 (-128/127) to +1.0, for example.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
.. [2] Adobe Systems Incorporated, "Adobe Audition 3 User Guide", section
"Audio file formats: 24-bit Packed Int (type 1, 20-bit)", 2007
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> from scipy.io import wavfile
>>> import scipy.io
Get the filename for an example .wav file from the tests/data directory.
>>> data_dir = pjoin(dirname(scipy.io.__file__), 'tests', 'data')
>>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav')
Load the .wav file contents.
>>> samplerate, data = wavfile.read(wav_fname)
>>> print(f"number of channels = {data.shape[1]}")
number of channels = 2
>>> length = data.shape[0] / samplerate
>>> print(f"length = {length}s")
length = 0.01s
Plot the waveform.
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> time = np.linspace(0., length, data.shape[0])
>>> plt.plot(time, data[:, 0], label="Left channel")
>>> plt.plot(time, data[:, 1], label="Right channel")
>>> plt.legend()
>>> plt.xlabel("Time [s]")
>>> plt.ylabel("Amplitude")
>>> plt.show()
"""
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
if mmap and int_form != 'lj':
raise ValueError('mmap must lj')
try:
file_size, is_big_endian = _read_riff_chunk(fid)
fmt_chunk_received = False
data_chunk_received = False
while fid.tell() < file_size:
# read the next chunk
chunk_id = fid.read(4)
if not chunk_id:
if data_chunk_received:
# End of file but data successfully read
warnings.warn(
"Reached EOF prematurely; finished at {:d} bytes, "
"expected {:d} bytes from header."
.format(fid.tell(), file_size),
WavFileWarning, stacklevel=2)
break
else:
raise ValueError("Unexpected end of file.")
elif len(chunk_id) < 4:
msg = f"Incomplete chunk ID: {repr(chunk_id)}"
# If we have the data, ignore the broken chunk
if fmt_chunk_received and data_chunk_received:
warnings.warn(msg + ", ignoring it.", WavFileWarning,
stacklevel=2)
else:
raise ValueError(msg)
if chunk_id == b'fmt ':
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
format_tag, channels, fs = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
block_align = fmt_chunk[5]
elif chunk_id == b'fact':
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id == b'data':
data_chunk_received = True
if not fmt_chunk_received:
raise ValueError("No fmt chunk before data")
data = _read_data_chunk(fid, format_tag, channels, bit_depth,
is_big_endian, block_align, mmap)
if format_tag == WAVE_FORMAT.IEEE_FLOAT:
continue
if int_form == 'lj':
pass
elif int_form == 'rj':
if bit_depth <= 8:
data = (data - 128).view(numpy.int8)
data = data >> (data.itemsize*8 - bit_depth)
elif int_form == 'fp':
if bit_depth <= 8:
data = (data - 128).view(numpy.int8)
data = data >> (data.itemsize*8 - bit_depth)
data = data / (2**(bit_depth - 1))
elif int_form == 'fs':
if bit_depth <= 8:
data = (data - 128).view(numpy.int8)
data = data >> (data.itemsize*8 - bit_depth)
data = data / (2**(bit_depth - 1) - 1)
else:
raise ValueError('int_form not understood')
elif chunk_id == b'LIST':
# Someday this could be handled properly but for now skip it
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id in {b'JUNK', b'Fake'}:
# Skip alignment chunks without warning
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn("Chunk (non-data) not understood, skipping it.",
WavFileWarning, stacklevel=2)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if not hasattr(filename, 'read'):
fid.close()
else:
fid.seek(0)
return fs, data
|
def read(filename, mmap=False, int_form='lj'):
"""
Open a WAV file
Return the sample rate (in samples/sec) and data from an LPCM WAV file.
Parameters
----------
filename : string or open file handle
Input WAV file.
mmap : bool, optional
Whether to read data as memory-mapped (default: False). Not compatible
with some bit depths; see Notes. Only to be used on real files.
.. versionadded:: 0.12.0
int_form : {'lj', 'rj', 'fp', 'fs'}
The format ['int_format', 'type', 'dtype', 'format_'?] of the returned
data for integer PCM files. (Float files are always returned in their
native type.) See Notes for details on each format.
- 'lj' [or 'native'?] is the native left-justified integer format of
the WAV itself. This is the only form compatible with mmap.
- 'rj' is right-justified signed integer format.
- 'fp' is float format, normalized with fixed-point convention
The format ``['int_format', 'type', 'dtype', 'format_'?]`` of the returned
- 'fs' is float format, normalized with full-scale convention
from +1.0 to slightly less than -1.0.
Returns
-------
rate : int
Sample rate of WAV file.
data : numpy array
Data read from WAV file. Data-type is determined from the file;
see Notes. Data is 1-D for 1-channel WAV, or 2-D of shape
(Nsamples, Nchannels) otherwise.
Notes
-----
WAV files can specify arbitrary bit depth, and this function supports
reading any integer PCM depth from 1 to 64 bits.
Non-linear PCM (mu-law, A-law) is not supported.
**Left-justified format**
Data is returned in the smallest compatible numpy int type. 8-bit and
lower is unsigned, while 9-bit and higher is signed.
For example, 24-bit data will be stored as int32, with the MSB of the
24-bit data stored at the MSB of the int32, and typically the least
significant byte is 0x00. (However, if a file actually contains data past
its specified bit depth, those bits will be read and output, too. [2]_)
This bit justification and sign matches WAV's native internal format, which
allows memory mapping of WAV files that use 1, 2, 4, or 8 bytes per sample
(so 24-bit files cannot be memory-mapped, but 32-bit can).
IEEE float PCM in 32- or 64-bit format is supported, with or without mmap.
Values exceeding [-1, +1] are not clipped.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit integer PCM -2147483648 +2147483647 int32
24-bit integer PCM -2147483648 +2147483392 int32
16-bit integer PCM -32768 +32767 int16
8-bit integer PCM 0 255 uint8
===================== =========== =========== =============
**Right-justified format**
Data is returned in the smallest compatible numpy int type, and is always
signed.
For example, 24-bit data will be stored as int32, with the LSB of the
24-bit data stored at the LSB of the int32, and the MSB as 0x00 or 0xff,
depending on sign. (If a file actually contains data past its specified
bit depth, those bits will be discarded during the shift. [2]_)
Examples:
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit integer PCM -2147483648 +2147483647 int32
24-bit integer PCM -8388608 +8388607 int32
16-bit integer PCM -32768 +32767 int16
12-bit integer PCM -2048 +2047 int16
8-bit integer PCM -128 +127 int8
===================== =========== =========== =============
**Fixed-point format**
This converts the integer values to float, dividing by 2**(N-1), which
interprets them as fixed-point numbers with the binary point
to the right of the MSbit, so it can reach -1.0 but cannot reach +1.0.
[3]_ Android [4]_ USB?
An 8-bit file would reach from -1.0 to 0.992 (127/128), for example.
**Full-scale format**
This converts the integer values to float, dividing by 2**(N-1) - 1, using
the convention that full-scale is defined by the highest positive
code. [5]_ AES [6]_ IEC?
An 8-bit file would reach from -1.008 (-128/127) to +1.0, for example.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
.. [2] Adobe Systems Incorporated, "Adobe Audition 3 User Guide", section
"Audio file formats: 24-bit Packed Int (type 1, 20-bit)", 2007
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> from scipy.io import wavfile
>>> import scipy.io
Get the filename for an example .wav file from the tests/data directory.
>>> data_dir = pjoin(dirname(scipy.io.__file__), 'tests', 'data')
>>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav')
Load the .wav file contents.
>>> samplerate, data = wavfile.read(wav_fname)
>>> print(f"number of channels = {data.shape[1]}")
number of channels = 2
>>> length = data.shape[0] / samplerate
>>> print(f"length = {length}s")
length = 0.01s
Plot the waveform.
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> time = np.linspace(0., length, data.shape[0])
>>> plt.plot(time, data[:, 0], label="Left channel")
>>> plt.plot(time, data[:, 1], label="Right channel")
>>> plt.legend()
>>> plt.xlabel("Time [s]")
>>> plt.ylabel("Amplitude")
>>> plt.show()
"""
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
if mmap and int_form != 'lj':
raise ValueError('mmap must lj')
try:
file_size, is_big_endian = _read_riff_chunk(fid)
fmt_chunk_received = False
data_chunk_received = False
while fid.tell() < file_size:
# read the next chunk
chunk_id = fid.read(4)
if not chunk_id:
if data_chunk_received:
# End of file but data successfully read
warnings.warn(
"Reached EOF prematurely; finished at {:d} bytes, "
"expected {:d} bytes from header."
.format(fid.tell(), file_size),
WavFileWarning, stacklevel=2)
break
else:
raise ValueError("Unexpected end of file.")
elif len(chunk_id) < 4:
msg = f"Incomplete chunk ID: {repr(chunk_id)}"
# If we have the data, ignore the broken chunk
if fmt_chunk_received and data_chunk_received:
warnings.warn(msg + ", ignoring it.", WavFileWarning,
stacklevel=2)
else:
raise ValueError(msg)
if chunk_id == b'fmt ':
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
format_tag, channels, fs = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
block_align = fmt_chunk[5]
elif chunk_id == b'fact':
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id == b'data':
data_chunk_received = True
if not fmt_chunk_received:
raise ValueError("No fmt chunk before data")
data = _read_data_chunk(fid, format_tag, channels, bit_depth,
is_big_endian, block_align, mmap)
if format_tag == WAVE_FORMAT.IEEE_FLOAT:
continue
if int_form == 'lj':
pass
elif int_form == 'rj':
if bit_depth <= 8:
data = (data - 128).view(numpy.int8)
data = data >> (data.itemsize*8 - bit_depth)
elif int_form == 'fp':
if bit_depth <= 8:
data = (data - 128).view(numpy.int8)
data = data >> (data.itemsize*8 - bit_depth)
data = data / (2**(bit_depth - 1))
elif int_form == 'fs':
if bit_depth <= 8:
data = (data - 128).view(numpy.int8)
data = data >> (data.itemsize*8 - bit_depth)
data = data / (2**(bit_depth - 1) - 1)
else:
raise ValueError('int_form not understood')
elif chunk_id == b'LIST':
# Someday this could be handled properly but for now skip it
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id in {b'JUNK', b'Fake'}:
# Skip alignment chunks without warning
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn("Chunk (non-data) not understood, skipping it.",
WavFileWarning, stacklevel=2)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if not hasattr(filename, 'read'):
fid.close()
else:
fid.seek(0)
return fs, data
|
47,971 |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--model_dir', type=Path, metavar='DIR',
default=Path.cwd(), help='root of the directory tree with IR model files')
parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR',
help='root of the directory tree to place compiled models files into')
parser.add_argument('--name', metavar='PAT[,PAT...]',
help='compile only models whose names match at least one of the specified patterns')
parser.add_argument('--list', type=Path, metavar='FILE.LST',
help='compile only models whose names match at least one of the patterns in the specified file')
parser.add_argument('-ip', '--input_precision', dest='input_precision',
help='Input precision of compiled network')
parser.add_argument('-op', '--output_precision', dest='output_precision',
help='output_precision of compiled network')
parser.add_argument('--precisions', metavar='PREC[,PREC...]',
help='compile only specified precisions')
parser.add_argument('--target_device', help='target device for the compiled model', default='MYRIAD')
parser.add_argument('--all', action='store_true', help='compile all available models')
parser.add_argument('--print_all', action='store_true', help='print all available models')
parser.add_argument('--compiler', type=Path, help='Compile Tool executable entry point')
parser.add_argument('--dry_run', action='store_true',
help='print the compilation commands without running them')
args = parser.parse_args()
compiler_path = args.compiler
if compiler_path is None:
try:
compiler_path = Path(os.environ['INTEL_OPENVINO_DIR']) / 'deployment_tools/tools/compile_tool/compile_tool'
except KeyError:
sys.exit('Unable to locate Compile Tool. '
+ 'Use --compiler or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.')
models = common.load_models_from_args(parser, args)
if args.precisions is None:
requested_precisions = common.KNOWN_COMPILABLE_PRECISIONS
else:
requested_precisions = set(args.precisions.split(','))
unknown_precisions = requested_precisions - common.KNOWN_COMPILABLE_PRECISIONS
if unknown_precisions:
sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions))))
reporter = common.Reporter(common.DirectOutputContext())
output_dir = args.model_dir if args.output_dir is None else args.output_dir
requested_precisions = common.KNOWN_COMPILABLE_PRECISIONS
failed_models = []
for model in models:
if not model.compilable:
reporter.print_section_heading('Skipping {} (compilation not supported)', model.name)
reporter.print()
continue
for precision in sorted(requested_precisions):
if not compile(reporter, compiler_path, model, precision, args, output_dir):
failed_models.append(model.name + ' (' + precision + ')')
continue
if failed_models:
reporter.print('FAILED:')
for failed_model_name in failed_models:
reporter.print(failed_model_name)
sys.exit(1)
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--model_dir', type=Path, metavar='DIR',
default=Path.cwd(), help='root of the directory tree with IR model files')
parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR',
help='root of the directory tree to place compiled models files into')
parser.add_argument('--name', metavar='PAT[,PAT...]',
help='compile only models whose names match at least one of the specified patterns')
parser.add_argument('--list', type=Path, metavar='FILE.LST',
help='compile only models whose names match at least one of the patterns in the specified file')
parser.add_argument('-ip', '--input_precision', dest='input_precision',
help='Input precision of compiled network')
parser.add_argument('-op', '--output_precision', dest='output_precision',
help='output_precision of compiled network')
parser.add_argument('--precisions', metavar='PREC[,PREC...]',
help='compile only specified precisions')
parser.add_argument('--target_device', help='target device for the compiled model', default='MYRIAD')
parser.add_argument('--all', action='store_true', help='compile all available models')
parser.add_argument('--print_all', action='store_true', help='print all available models')
parser.add_argument('--compile_tool', type=Path, help='Compile Tool executable entry point')
parser.add_argument('--dry_run', action='store_true',
help='print the compilation commands without running them')
args = parser.parse_args()
compiler_path = args.compiler
if compiler_path is None:
try:
compiler_path = Path(os.environ['INTEL_OPENVINO_DIR']) / 'deployment_tools/tools/compile_tool/compile_tool'
except KeyError:
sys.exit('Unable to locate Compile Tool. '
+ 'Use --compiler or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.')
models = common.load_models_from_args(parser, args)
if args.precisions is None:
requested_precisions = common.KNOWN_COMPILABLE_PRECISIONS
else:
requested_precisions = set(args.precisions.split(','))
unknown_precisions = requested_precisions - common.KNOWN_COMPILABLE_PRECISIONS
if unknown_precisions:
sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions))))
reporter = common.Reporter(common.DirectOutputContext())
output_dir = args.model_dir if args.output_dir is None else args.output_dir
requested_precisions = common.KNOWN_COMPILABLE_PRECISIONS
failed_models = []
for model in models:
if not model.compilable:
reporter.print_section_heading('Skipping {} (compilation not supported)', model.name)
reporter.print()
continue
for precision in sorted(requested_precisions):
if not compile(reporter, compiler_path, model, precision, args, output_dir):
failed_models.append(model.name + ' (' + precision + ')')
continue
if failed_models:
reporter.print('FAILED:')
for failed_model_name in failed_models:
reporter.print(failed_model_name)
sys.exit(1)
|
8,284 |
def _read_pts(filename, scale=None, rebin_energy=1, sum_frames=True,
SI_dtype=np.uint8, cutoff_at_kV=None, downsample=1,
only_valid_data=True, read_em_image=False,
frame_list=None, frame_start_index=None, frame_shifts=None,
lazy=False,
**kwargs):
"""
Parameters
----------
rawdata : numpy.ndarray of uint16
spectrum image part of pts file
scale : list of float
-scale[2], scale[3] are the positional scale from asw data,
default is None, calc from pts internal data
rebin_energy : int
Binning parameter along energy axis. Must be 2^n.
sum_frames : bool
If False, returns each frame.
SI_dtype : dtype
data type for spectrum image. default is uint8
cutoff_at_kV : float
The maximum energy. Useful to reduce memory size of spectrum image.
Default is None (no cutoff)
downsample : int or (int, int)
Downsample along spacial axes to reduce memory size of spectrum image
ue must be 2^n. Default is 1 (no downsampling).
only_valid_data : bool, default True
Read incomplete frame if only_valid_data == False
(usually interrupting mesurement makes incomplete frame)
read_em_image : bool, default False
Read SEM/STEM image from pts file if read_em_image == True
frame_list : list of int, default None
List of frame numbers to be read (None for all data)
frame_shifts : list of [int, int] or list of [int, int, int], default None
Each frame will be loaded with offset of dy, dx, (and optionary energy
axis). Units are pixels/channels.
This is useful for express drift correction. Not suitable for accurate
analysis.
Like the result of estimate_shift2D(), the first parameter is for y-axis
frame_start_index: list
The list of offset pointer of each frame in the raw data.
The pointer for frame0 is 0.
lazy : bool, default False
Read spectrum image into sparse array if lazy == True
SEM/STEM image is always read into dense array (numpy.ndarray)
Returns
-------
dictionary : dict or list of dict
The dictionary used to create the signals, list of dictionaries of
spectrum image and SEM/STEM image if read_em_image == True
"""
fd = open(filename, "br")
file_magic = np.fromfile(fd, "<I", 1)[0]
def check_multiple(factor, number, string):
if factor > 1 and number % factor != 0:
fd.close()
raise ValueError(f'`{string}` must be a multiple of {number}.')
check_multiple(rebin_energy, 4096, 'rebin_energy')
rebin_energy = int(rebin_energy)
if file_magic == 304:
# fileformat
_ = _decode(fd.read(8).rstrip(b"\x00"))
a, b, head_pos, head_len, data_pos, data_len = np.fromfile(fd, "<I", 6)
# groupname
_ = _decode(fd.read(128).rstrip(b"\x00"))
# memo
_ = _decode(fd.read(132).rstrip(b"\x00"))
datefile = datetime(1899, 12, 30) + timedelta(days=np.fromfile(fd, "d", 1)[0])
fd.seek(head_pos + 12)
header = _parsejeol(fd)
meas_data_header = header["PTTD Data"]["AnalyzableMap MeasData"]
width, height = meas_data_header["Meas Cond"]["Pixels"].split("x")
width = int(width)
height = int(height)
if isinstance(downsample, Iterable):
if len(downsample) > 2:
raise ValueError("`downsample` can't be an iterable of length "
"different from 2.")
downsample_width = downsample[0]
downsample_height = downsample[1]
check_multiple(downsample_width, width, 'downsample[0]')
check_multiple(downsample_height, height, 'downsample[1]')
else:
downsample_width = downsample_height = int(downsample)
check_multiple(downsample_width, width, 'downsample')
check_multiple(downsample_height, height, 'downsample')
check_multiple(downsample_width, width, 'downsample[0]')
check_multiple(downsample_height, height, 'downsample[1]')
# Normalisation factor for the x and y position in the stream; depends
# on the downsampling and the size of the navigation space
width_norm = int(4096 / width * downsample_width)
height_norm = int(4096 / height * downsample_height)
width = int(width / downsample_width)
height = int(height / downsample_height)
channel_number = int(4096 / rebin_energy)
fd.seek(data_pos)
# read spectrum image
rawdata = np.fromfile(fd, dtype="u2")
fd.close()
if scale is not None:
xscale = -scale[2] / width
yscale = scale[3] / height
units = "µm"
else:
scale = header["PTTD Param"]["Params"]["PARAMPAGE0_SEM"]["ScanSize"] / meas_data_header["MeasCond"]["Mag"] * 1.0E3
xscale = scale / width
yscale = scale / height
units = "µm"
ch_mod = meas_data_header["Meas Cond"]["Tpl"]
ch_res = meas_data_header["Doc"]["CoefA"]
ch_ini = meas_data_header["Doc"]["CoefB"]
ch_pos = header["PTTD Param"]["Params"]["PARAMPAGE1_EDXRF"]["Tpl"][ch_mod][
"DigZ"
]
energy_offset = ch_ini - ch_res * ch_pos
energy_scale = ch_res * rebin_energy
if cutoff_at_kV is not None:
channel_number = int(
np.round((cutoff_at_kV - energy_offset) / energy_scale)
)
# pixel time in milliseconds
pixel_time = meas_data_header["Doc"]["DwellTime(msec)"]
# Sweep value is not reliable, so +1 frame is needed if sum_frames = False
# priority of the length of frame_start_index is higher than "sweep" in header
sweep = meas_data_header["Doc"]["Sweep"]
if frame_start_index:
sweep = len(frame_start_index)
auto_frame_list = False
if frame_list:
frame_list = np.asarray(frame_list)
else:
auto_frame_list = True
frame_list = np.arange(sweep + 1)
# Remove frame numbers outside the data range.
# The frame with number == sweep is accepted in this stage
# for incomplete frame
# If "frame_shifts" option is used, frame number must be in range of frame_shifts
if frame_shifts is not None:
nsf = len(frame_shifts)
wrong_frames_list = frame_list[
np.where((frame_list<0) | (frame_list > sweep)
| (frame_list > nsf)
| ((frame_list == nsf) & (not auto_frame_list)))]
frame_list = frame_list[
np.where((0 <= frame_list) & (frame_list <= sweep)
& (frame_list < nsf))]
else:
wrong_frames_list = frame_list[
np.where((frame_list<0) | (frame_list > sweep))]
frame_list = frame_list[
np.where((0 <= frame_list) & (frame_list <= sweep))]
if len(wrong_frames_list) > 0:
wrong_frames = wrong_frames_list.flatten().tolist()
_logger.info(f"Invalid frame number is specified. The frame {wrong_frames} is not found in pts data.")
# + 1 for incomplete frame
max_frame = frame_list.max() + 1
if frame_start_index is None:
frame_start_index = np.full(max_frame, -1, dtype = np.int32)
else:
frame_start_index = np.asarray(frame_start_index)
# fill with -1 as invaid index (not loaded)
if (frame_start_index.size < max_frame):
fi = np.full(max_frame, -1, dtype = np.int32)
fi[0:frame_start_index.size] = frame_start_index
frame_start_index = fi
if frame_shifts is None:
frame_shifts = np.zeros((max_frame,3), dtype = np.int16)
if (len(frame_shifts) < max_frame):
fs =np.zeros((max_frame,3), dtype = np.int16)
if len(frame_shifts) > 0:
fs[0:len(frame_shifts),0:len(frame_shifts[0])] = frame_shifts
frame_shifts = fs
if len(frame_shifts[0])==2: # fill z with 0
fr = np.zeros((max_frame,3), dtype = np.int16)
fr[:len(frame_shifts), 0:2] = np.asarray(frame_shifts)
frame_shifts = fr
data, em_data, has_em_image, sweep, frame_start_index, last_valid, origin, frame_shifts_1 = _readcube(
rawdata, frame_start_index, frame_list,
width, height, channel_number,
width_norm, height_norm, rebin_energy,
SI_dtype, sweep, frame_shifts,
sum_frames, read_em_image, only_valid_data, lazy)
header["jeol_pts_frame_origin"] = origin
header["jeol_pts_frame_shifts"] = frame_shifts_1
header["jeol_pts_frame_start_index"] = frame_start_index
# axes_em for SEM/STEM image intensity[(frame,) y, x]
# axes for spectrum image count[(frame,) y, x, energy]
if sum_frames:
axes_em = []
width = data.shape[1]
height = data.shape[0]
else:
axes_em = [{
"index_in_array": 0,
"name": "Frame",
"size": sweep,
"offset": 0,
"scale": pixel_time*height*width/1E3,
"units": 's',
}]
width = data.shape[2]
height = data.shape[1]
axes_em.extend( [
{
"name": "y",
"size": height,
"offset": origin[1],
"scale": yscale,
"units": units,
},
{
"name": "x",
"size": width,
"offset": origin[0],
"scale": xscale,
"units": units,
}
] )
axes = axes_em.copy()
axes.append(
{
"name": "Energy",
"size": channel_number,
"offset": energy_offset,
"scale": energy_scale,
"units": "keV",
},
)
if (not last_valid) and only_valid_data:
_logger.info("The last frame (sweep) is incomplete because the acquisition stopped during this frame. The partially acquired frame is ignored. Use 'sum_frames=False, only_valid_data=False' to read all frames individually, including the last partially completed frame.")
hv = meas_data_header["MeasCond"]["AccKV"]
if hv <= 30.0:
mode = "SEM"
else:
mode = "TEM"
detector_hearder = header["PTTD Param"]["Params"]["PARAMPAGE0_SEM"]
metadata = {
"Acquisition_instrument": {
mode: {
"beam_energy": hv,
"magnification": meas_data_header["MeasCond"]["Mag"],
"Detector": {
"EDS": {
"azimuth_angle": detector_hearder["DirAng"],
"detector_type": detector_hearder["DetT"],
"elevation_angle": detector_hearder["ElevAng"],
"energy_resolution_MnKa": detector_hearder["MnKaRES"],
"real_time": meas_data_header["Doc"]["RealTime"],
},
},
},
},
"General": {
"original_filename": os.path.basename(filename),
"date": datefile.date().isoformat(),
"time": datefile.time().isoformat(),
"title": "EDS extracted from " + os.path.basename(filename),
},
"Signal": {
"record_by": "spectrum",
"quantity": "X-rays (Counts)",
"signal_type": "EDS_" + mode,
},
}
metadata_em = {
"Acquisition_instrument": {
mode: {
"beam_energy": hv,
"magnification": meas_data_header["MeasCond"]["Mag"],
},
},
"General": {
"original_filename": os.path.basename(filename),
"date": datefile.date().isoformat(),
"time": datefile.time().isoformat(),
"title": "S(T)EM Image extracted from " + os.path.basename(filename)
},
"Signal": {
"record_by": "image",
},
}
dictionary = {
"data": data,
"axes": axes,
"metadata": metadata,
"original_metadata": header,
}
if read_em_image and has_em_image:
dictionary = [dictionary,
{
"data": em_data,
"axes": axes_em,
"metadata": metadata_em,
"original_metadata": header
}]
else:
_logger.warning("Not a valid JEOL pts format")
fd.close()
return dictionary
|
def _read_pts(filename, scale=None, rebin_energy=1, sum_frames=True,
SI_dtype=np.uint8, cutoff_at_kV=None, downsample=1,
only_valid_data=True, read_em_image=False,
frame_list=None, frame_start_index=None, frame_shifts=None,
lazy=False,
**kwargs):
"""
Parameters
----------
rawdata : numpy.ndarray of uint16
spectrum image part of pts file
scale : list of float
-scale[2], scale[3] are the positional scale from asw data,
default is None, calc from pts internal data
rebin_energy : int
Binning parameter along energy axis. Must be 2^n.
sum_frames : bool
If False, returns each frame.
SI_dtype : dtype
data type for spectrum image. default is uint8
cutoff_at_kV : float
The maximum energy. Useful to reduce memory size of spectrum image.
Default is None (no cutoff)
downsample : int or (int, int)
Downsample along spacial axes to reduce memory size of spectrum image
ue must be 2^n. Default is 1 (no downsampling).
only_valid_data : bool, default True
Read incomplete frame if only_valid_data == False
(usually interrupting mesurement makes incomplete frame)
read_em_image : bool, default False
Read SEM/STEM image from pts file if read_em_image == True
frame_list : list of int, default None
List of frame numbers to be read (None for all data)
frame_shifts : list of [int, int] or list of [int, int, int], default None
Each frame will be loaded with offset of dy, dx, (and optionary energy
axis). Units are pixels/channels.
This is useful for express drift correction. Not suitable for accurate
analysis.
Like the result of estimate_shift2D(), the first parameter is for y-axis
frame_start_index: list
The list of offset pointers of each frame in the raw data.
The pointer for frame0 is 0.
lazy : bool, default False
Read spectrum image into sparse array if lazy == True
SEM/STEM image is always read into dense array (numpy.ndarray)
Returns
-------
dictionary : dict or list of dict
The dictionary used to create the signals, list of dictionaries of
spectrum image and SEM/STEM image if read_em_image == True
"""
fd = open(filename, "br")
file_magic = np.fromfile(fd, "<I", 1)[0]
def check_multiple(factor, number, string):
if factor > 1 and number % factor != 0:
fd.close()
raise ValueError(f'`{string}` must be a multiple of {number}.')
check_multiple(rebin_energy, 4096, 'rebin_energy')
rebin_energy = int(rebin_energy)
if file_magic == 304:
# fileformat
_ = _decode(fd.read(8).rstrip(b"\x00"))
a, b, head_pos, head_len, data_pos, data_len = np.fromfile(fd, "<I", 6)
# groupname
_ = _decode(fd.read(128).rstrip(b"\x00"))
# memo
_ = _decode(fd.read(132).rstrip(b"\x00"))
datefile = datetime(1899, 12, 30) + timedelta(days=np.fromfile(fd, "d", 1)[0])
fd.seek(head_pos + 12)
header = _parsejeol(fd)
meas_data_header = header["PTTD Data"]["AnalyzableMap MeasData"]
width, height = meas_data_header["Meas Cond"]["Pixels"].split("x")
width = int(width)
height = int(height)
if isinstance(downsample, Iterable):
if len(downsample) > 2:
raise ValueError("`downsample` can't be an iterable of length "
"different from 2.")
downsample_width = downsample[0]
downsample_height = downsample[1]
check_multiple(downsample_width, width, 'downsample[0]')
check_multiple(downsample_height, height, 'downsample[1]')
else:
downsample_width = downsample_height = int(downsample)
check_multiple(downsample_width, width, 'downsample')
check_multiple(downsample_height, height, 'downsample')
check_multiple(downsample_width, width, 'downsample[0]')
check_multiple(downsample_height, height, 'downsample[1]')
# Normalisation factor for the x and y position in the stream; depends
# on the downsampling and the size of the navigation space
width_norm = int(4096 / width * downsample_width)
height_norm = int(4096 / height * downsample_height)
width = int(width / downsample_width)
height = int(height / downsample_height)
channel_number = int(4096 / rebin_energy)
fd.seek(data_pos)
# read spectrum image
rawdata = np.fromfile(fd, dtype="u2")
fd.close()
if scale is not None:
xscale = -scale[2] / width
yscale = scale[3] / height
units = "µm"
else:
scale = header["PTTD Param"]["Params"]["PARAMPAGE0_SEM"]["ScanSize"] / meas_data_header["MeasCond"]["Mag"] * 1.0E3
xscale = scale / width
yscale = scale / height
units = "µm"
ch_mod = meas_data_header["Meas Cond"]["Tpl"]
ch_res = meas_data_header["Doc"]["CoefA"]
ch_ini = meas_data_header["Doc"]["CoefB"]
ch_pos = header["PTTD Param"]["Params"]["PARAMPAGE1_EDXRF"]["Tpl"][ch_mod][
"DigZ"
]
energy_offset = ch_ini - ch_res * ch_pos
energy_scale = ch_res * rebin_energy
if cutoff_at_kV is not None:
channel_number = int(
np.round((cutoff_at_kV - energy_offset) / energy_scale)
)
# pixel time in milliseconds
pixel_time = meas_data_header["Doc"]["DwellTime(msec)"]
# Sweep value is not reliable, so +1 frame is needed if sum_frames = False
# priority of the length of frame_start_index is higher than "sweep" in header
sweep = meas_data_header["Doc"]["Sweep"]
if frame_start_index:
sweep = len(frame_start_index)
auto_frame_list = False
if frame_list:
frame_list = np.asarray(frame_list)
else:
auto_frame_list = True
frame_list = np.arange(sweep + 1)
# Remove frame numbers outside the data range.
# The frame with number == sweep is accepted in this stage
# for incomplete frame
# If "frame_shifts" option is used, frame number must be in range of frame_shifts
if frame_shifts is not None:
nsf = len(frame_shifts)
wrong_frames_list = frame_list[
np.where((frame_list<0) | (frame_list > sweep)
| (frame_list > nsf)
| ((frame_list == nsf) & (not auto_frame_list)))]
frame_list = frame_list[
np.where((0 <= frame_list) & (frame_list <= sweep)
& (frame_list < nsf))]
else:
wrong_frames_list = frame_list[
np.where((frame_list<0) | (frame_list > sweep))]
frame_list = frame_list[
np.where((0 <= frame_list) & (frame_list <= sweep))]
if len(wrong_frames_list) > 0:
wrong_frames = wrong_frames_list.flatten().tolist()
_logger.info(f"Invalid frame number is specified. The frame {wrong_frames} is not found in pts data.")
# + 1 for incomplete frame
max_frame = frame_list.max() + 1
if frame_start_index is None:
frame_start_index = np.full(max_frame, -1, dtype = np.int32)
else:
frame_start_index = np.asarray(frame_start_index)
# fill with -1 as invaid index (not loaded)
if (frame_start_index.size < max_frame):
fi = np.full(max_frame, -1, dtype = np.int32)
fi[0:frame_start_index.size] = frame_start_index
frame_start_index = fi
if frame_shifts is None:
frame_shifts = np.zeros((max_frame,3), dtype = np.int16)
if (len(frame_shifts) < max_frame):
fs =np.zeros((max_frame,3), dtype = np.int16)
if len(frame_shifts) > 0:
fs[0:len(frame_shifts),0:len(frame_shifts[0])] = frame_shifts
frame_shifts = fs
if len(frame_shifts[0])==2: # fill z with 0
fr = np.zeros((max_frame,3), dtype = np.int16)
fr[:len(frame_shifts), 0:2] = np.asarray(frame_shifts)
frame_shifts = fr
data, em_data, has_em_image, sweep, frame_start_index, last_valid, origin, frame_shifts_1 = _readcube(
rawdata, frame_start_index, frame_list,
width, height, channel_number,
width_norm, height_norm, rebin_energy,
SI_dtype, sweep, frame_shifts,
sum_frames, read_em_image, only_valid_data, lazy)
header["jeol_pts_frame_origin"] = origin
header["jeol_pts_frame_shifts"] = frame_shifts_1
header["jeol_pts_frame_start_index"] = frame_start_index
# axes_em for SEM/STEM image intensity[(frame,) y, x]
# axes for spectrum image count[(frame,) y, x, energy]
if sum_frames:
axes_em = []
width = data.shape[1]
height = data.shape[0]
else:
axes_em = [{
"index_in_array": 0,
"name": "Frame",
"size": sweep,
"offset": 0,
"scale": pixel_time*height*width/1E3,
"units": 's',
}]
width = data.shape[2]
height = data.shape[1]
axes_em.extend( [
{
"name": "y",
"size": height,
"offset": origin[1],
"scale": yscale,
"units": units,
},
{
"name": "x",
"size": width,
"offset": origin[0],
"scale": xscale,
"units": units,
}
] )
axes = axes_em.copy()
axes.append(
{
"name": "Energy",
"size": channel_number,
"offset": energy_offset,
"scale": energy_scale,
"units": "keV",
},
)
if (not last_valid) and only_valid_data:
_logger.info("The last frame (sweep) is incomplete because the acquisition stopped during this frame. The partially acquired frame is ignored. Use 'sum_frames=False, only_valid_data=False' to read all frames individually, including the last partially completed frame.")
hv = meas_data_header["MeasCond"]["AccKV"]
if hv <= 30.0:
mode = "SEM"
else:
mode = "TEM"
detector_hearder = header["PTTD Param"]["Params"]["PARAMPAGE0_SEM"]
metadata = {
"Acquisition_instrument": {
mode: {
"beam_energy": hv,
"magnification": meas_data_header["MeasCond"]["Mag"],
"Detector": {
"EDS": {
"azimuth_angle": detector_hearder["DirAng"],
"detector_type": detector_hearder["DetT"],
"elevation_angle": detector_hearder["ElevAng"],
"energy_resolution_MnKa": detector_hearder["MnKaRES"],
"real_time": meas_data_header["Doc"]["RealTime"],
},
},
},
},
"General": {
"original_filename": os.path.basename(filename),
"date": datefile.date().isoformat(),
"time": datefile.time().isoformat(),
"title": "EDS extracted from " + os.path.basename(filename),
},
"Signal": {
"record_by": "spectrum",
"quantity": "X-rays (Counts)",
"signal_type": "EDS_" + mode,
},
}
metadata_em = {
"Acquisition_instrument": {
mode: {
"beam_energy": hv,
"magnification": meas_data_header["MeasCond"]["Mag"],
},
},
"General": {
"original_filename": os.path.basename(filename),
"date": datefile.date().isoformat(),
"time": datefile.time().isoformat(),
"title": "S(T)EM Image extracted from " + os.path.basename(filename)
},
"Signal": {
"record_by": "image",
},
}
dictionary = {
"data": data,
"axes": axes,
"metadata": metadata,
"original_metadata": header,
}
if read_em_image and has_em_image:
dictionary = [dictionary,
{
"data": em_data,
"axes": axes_em,
"metadata": metadata_em,
"original_metadata": header
}]
else:
_logger.warning("Not a valid JEOL pts format")
fd.close()
return dictionary
|
35,507 |
def initialize_v_cruise(v_ego, buttonEvents, v_cruise_last):
for b in buttonEvents:
# 250kph or above probably means we never had a set speed
if b.type in [car.CarState.ButtonEvent.Type.accelCruise, car.CarState.ButtonEvent.Type.resumeCruise] and v_cruise_last < 250:
return v_cruise_last
return int(round(clip(v_ego * CV.MS_TO_KPH, V_CRUISE_ENABLE_MIN, V_CRUISE_MAX)))
|
def initialize_v_cruise(v_ego, buttonEvents, v_cruise_last):
for b in buttonEvents:
# 250kph or above probably means we never had a set speed
if b.type in (car.CarState.ButtonEvent.Type.accelCruise, car.CarState.ButtonEvent.Type.resumeCruise) and v_cruise_last < 250:
return v_cruise_last
return int(round(clip(v_ego * CV.MS_TO_KPH, V_CRUISE_ENABLE_MIN, V_CRUISE_MAX)))
|
36,010 |
def migration_dbgroup_type_string(data):
"""Apply migration 0044 - REV. 1.0.44
Rename the `type_string` columns of all `Group` instances.
"""
mapping = {
'user': 'core',
'data.upf.family': 'core.upf',
'auto.import': 'core.import',
'auto.run': 'core.auto',
}
for _, attributes in data.get('export_data', {}).get('Group', {}).items():
for old, new in mapping.items():
if attributes['type_string'] == old:
attributes['type_string'] = new
|
def migration_dbgroup_type_string(data):
"""Apply migration 0044 - REV. 1.0.44
Rename the `type_string` columns of all `Group` instances.
"""
mapping = {
'user': 'core',
'data.upf.family': 'core.upf',
'auto.import': 'core.import',
'auto.run': 'core.auto',
}
for attributes in data.get('export_data', {}).get('Group', {}).values():
for old, new in mapping.items():
if attributes['type_string'] == old:
attributes['type_string'] = new
|
53,476 |
def integer_sum(a, b):
"""Returns sum of two integers
:param a: first integer
:type a: int
:param b: second integer
:type b: int
:return: sum of parameters a and b
:rtype: int
"""
return a + b
|
def integer_sum(a, b) -> int:
"""Returns sum of two integers
:param a: first integer
:type a: int
:param b: second integer
:type b: int
:return: sum of parameters a and b
"""
return a + b
|
7,431 |
def stain_color_matrix(colors):
"""Creates a stain color matrix for a combination of stains.
This routine knows some common stains, their colors are taken from
other tools implementing stain unmixing, but will likely not exactly
match the colors of the stains in your image. This is because the
color of a stain depends on many factors, including the chemistry,
the microscope light source, and the RGB camera capturing the image.
It is always best to measure your stain colors.
Known stains are:
"Hematoxylin"
"Eosin"
"DAB"
"AEC"
"Alcian Blue"
"Aniline Blue"
"Azocarmine"
"FastBlue"
"FastRed"
"Feulgen"
"Light Green"
"Methyl Blue"
"Methyl Green"
"Orange-G"
"PAS"
"Ponceau Fuchsin"
See separate_stains() and combine_stains().
Parameters
----------
colors : iterable with 1 to 3 elements. Each element must be either a
string for a known stain name (see below) or an RGB triplet in the
form of an iterable.
Returns
-------
out : (..., 3) ndarray
The stain color matrix, an Nx3 matrix, where N is the length of the
input `colors`.
Raises
------
ValueError
If `colors` contains an unknown stain name or an illegal RGB triplet,
or if `colors` is empty or has more than 3 elements.
References
----------
.. [1] https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
"""
# Following matrices are adapted form the Java code written by G.Landini.
# https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
# Similar values can be found in CellProfiler:
# https://github.com/CellProfiler/CellProfiler/blob/master/cellprofiler/modules/unmixcolors.py
stain_colors = {
"Hematoxylin": (0.650, 0.704, 0.286),
"Eosin": (0.092789, 0.954111, 0.283111),
"DAB": (0.268, 0.570, 0.776),
"AEC": (0.2743, 0.6796, 0.6803),
"Alcian Blue": (0.552556, 0.7544, 0.353744),
"Aniline Blue": (0.853033, 0.508733, 0.112656),
"Azocarmine": (0.09289875, 0.8662008, 0.49098468),
"FastBlue": (0.74890292, 0.60624161, 0.26731082),
"FastRed": (0.21393921, 0.85112669, 0.47794022),
"Feulgen": (0.46420921, 0.83008335, 0.30827187),
"Light Green": (0.94705542, 0.25373821, 0.19650764),
"Methyl Blue": (0.7995107, 0.5913521, 0.10528667),
"Methyl Green": (0.98003, 0.144316, 0.133146),
"Orange-G": (0.10732849, 0.36765403, 0.9237484),
"PAS": (0.175411, 0.972178, 0.154589),
"Ponceau Fuchsin": (0.09997159, 0.73738605, 0.6680326),
}
N = len(colors)
if N < 1 or N > 3:
msg = (f'the input `colors` must have between 1 and 3 elements, got {N}')
raise ValueError(msg)
out = np.zeros((N, 3))
for ii, val in enumerate(colors):
if isinstance(val, str):
if not val in stain_colors:
msg = (f'the input `colors` contains {val}, which I do not recognize as a stain')
raise ValueError(msg)
val = stain_colors[val]
else:
if len(val) != 3 or not all(isinstance(v, float) for v in val):
msg = (f'the input `colors` contains {val}, which is not an RGB triplet')
raise ValueError(msg)
norm = np.linalg.norm(val)
val = [v / norm for v in val]
out[ii, :] = val
return out
|
def stain_color_matrix(colors):
"""Creates a stain color matrix for a combination of stains.
This routine knows some common stains, their colors are taken from
other tools implementing stain unmixing, but will likely not exactly
match the colors of the stains in your image. This is because the
color of a stain depends on many factors, including the chemistry,
the microscope light source, and the RGB camera capturing the image.
It is always best to measure your stain colors.
Known stains are:
"Hematoxylin"
"Eosin"
"DAB"
"AEC"
"Alcian Blue"
"Aniline Blue"
"Azocarmine"
"FastBlue"
"FastRed"
"Feulgen"
"Light Green"
"Methyl Blue"
"Methyl Green"
"Orange-G"
"PAS"
"Ponceau Fuchsin"
See separate_stains() and combine_stains().
Parameters
----------
colors : iterable with 1 to 3 elements. Each element must be either a
string for a known stain name (see below) or an RGB triplet in the
form of an iterable.
Returns
-------
out : (..., 3) ndarray
The stain color matrix, an Nx3 matrix, where N is the length of the
input `colors`.
Raises
------
ValueError
If `colors` contains an unknown stain name or an illegal RGB triplet,
or if `colors` is empty or has more than 3 elements.
References
----------
.. [1] https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
"""
# Following matrices are adapted from the Java code written by G. Landini.
# https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
# Similar values can be found in CellProfiler:
# https://github.com/CellProfiler/CellProfiler/blob/master/cellprofiler/modules/unmixcolors.py
stain_colors = {
"Hematoxylin": (0.650, 0.704, 0.286),
"Eosin": (0.092789, 0.954111, 0.283111),
"DAB": (0.268, 0.570, 0.776),
"AEC": (0.2743, 0.6796, 0.6803),
"Alcian Blue": (0.552556, 0.7544, 0.353744),
"Aniline Blue": (0.853033, 0.508733, 0.112656),
"Azocarmine": (0.09289875, 0.8662008, 0.49098468),
"FastBlue": (0.74890292, 0.60624161, 0.26731082),
"FastRed": (0.21393921, 0.85112669, 0.47794022),
"Feulgen": (0.46420921, 0.83008335, 0.30827187),
"Light Green": (0.94705542, 0.25373821, 0.19650764),
"Methyl Blue": (0.7995107, 0.5913521, 0.10528667),
"Methyl Green": (0.98003, 0.144316, 0.133146),
"Orange-G": (0.10732849, 0.36765403, 0.9237484),
"PAS": (0.175411, 0.972178, 0.154589),
"Ponceau Fuchsin": (0.09997159, 0.73738605, 0.6680326),
}
N = len(colors)
if N < 1 or N > 3:
msg = (f'the input `colors` must have between 1 and 3 elements, got {N}')
raise ValueError(msg)
out = np.zeros((N, 3))
for ii, val in enumerate(colors):
if isinstance(val, str):
if not val in stain_colors:
msg = (f'the input `colors` contains {val}, which I do not recognize as a stain')
raise ValueError(msg)
val = stain_colors[val]
else:
if len(val) != 3 or not all(isinstance(v, float) for v in val):
msg = (f'the input `colors` contains {val}, which is not an RGB triplet')
raise ValueError(msg)
norm = np.linalg.norm(val)
val = [v / norm for v in val]
out[ii, :] = val
return out
|
42,038 |
def _get_optimization_histories_with_error_bar(
studies: List[Study],
target: Optional[Callable[[FrozenTrial], float]],
target_name: str,
ax: "Axes",
) -> "Axes":
max_trial_number = np.max(
[
trial.number
for study in studies
for trial in study.get_trials(states=(TrialState.COMPLETE,))
]
)
_target: Callable[
[
FrozenTrial,
],
float,
]
if target is None:
def _target(t: FrozenTrial) -> float:
return cast(float, t.value)
else:
_target = target
target_values: List[List[float]] = [[] for _ in range(max_trial_number + 2)]
for study in studies:
trials = study.get_trials(states=(TrialState.COMPLETE,))
for t in trials:
target_values[t.number].append(_target(t))
mean_of_target_values = [np.mean(v) if len(v) > 0 else None for v in target_values]
std_of_target_values = [np.std(v) if len(v) > 0 else None for v in target_values]
trial_numbers = np.arange(max_trial_number + 2)[[v is not None for v in mean_of_target_values]]
means = np.asarray(mean_of_target_values)[trial_numbers]
stds = np.asarray(std_of_target_values)[trial_numbers]
plt.errorbar(
x=trial_numbers,
y=means,
yerr=stds,
capsize=5,
fmt="o",
color="tab:blue",
)
ax.scatter(trial_numbers, means, color="tab:blue", label=target_name)
best_values: List[List[float]] = [[] for _ in range(max_trial_number + 2)]
if target is None:
for study in studies:
trials = study.get_trials(states=(TrialState.COMPLETE,))
if study.direction == StudyDirection.MINIMIZE:
best_vs = np.minimum.accumulate([cast(float, t.values) for t in trials])
else:
best_vs = np.maximum.accumulate([cast(float, t.values) for t in trials])
for i, t in enumerate(trials):
best_values[t.number].append(best_vs[i])
mean_of_best_values = [np.mean(v) if len(v) > 0 else None for v in best_values]
std_of_best_values = [np.std(v) if len(v) > 0 else None for v in best_values]
means = np.asarray(mean_of_best_values)[trial_numbers]
stds = np.asarray(std_of_best_values)[trial_numbers]
ax.plot(trial_numbers, means, color="tab:red", label="Best Value")
ax.fill_between(
x=trial_numbers,
y1=np.array(means - stds, float),
y2=np.array(means + stds, float),
color="tab:red",
alpha=0.4,
)
ax.legend()
return ax
|
def _get_optimization_histories_with_error_bar(
studies: List[Study],
target: Optional[Callable[[FrozenTrial], float]],
target_name: str,
ax: "Axes",
) -> "Axes":
max_trial_number = np.max(
[
trial.number
for study in studies
for trial in study.get_trials(states=(TrialState.COMPLETE,))
]
)
_target: Callable[[FrozenTrial], float]
if target is None:
def _target(t: FrozenTrial) -> float:
return cast(float, t.value)
else:
_target = target
target_values: List[List[float]] = [[] for _ in range(max_trial_number + 2)]
for study in studies:
trials = study.get_trials(states=(TrialState.COMPLETE,))
for t in trials:
target_values[t.number].append(_target(t))
mean_of_target_values = [np.mean(v) if len(v) > 0 else None for v in target_values]
std_of_target_values = [np.std(v) if len(v) > 0 else None for v in target_values]
trial_numbers = np.arange(max_trial_number + 2)[[v is not None for v in mean_of_target_values]]
means = np.asarray(mean_of_target_values)[trial_numbers]
stds = np.asarray(std_of_target_values)[trial_numbers]
plt.errorbar(
x=trial_numbers,
y=means,
yerr=stds,
capsize=5,
fmt="o",
color="tab:blue",
)
ax.scatter(trial_numbers, means, color="tab:blue", label=target_name)
best_values: List[List[float]] = [[] for _ in range(max_trial_number + 2)]
if target is None:
for study in studies:
trials = study.get_trials(states=(TrialState.COMPLETE,))
if study.direction == StudyDirection.MINIMIZE:
best_vs = np.minimum.accumulate([cast(float, t.values) for t in trials])
else:
best_vs = np.maximum.accumulate([cast(float, t.values) for t in trials])
for i, t in enumerate(trials):
best_values[t.number].append(best_vs[i])
mean_of_best_values = [np.mean(v) if len(v) > 0 else None for v in best_values]
std_of_best_values = [np.std(v) if len(v) > 0 else None for v in best_values]
means = np.asarray(mean_of_best_values)[trial_numbers]
stds = np.asarray(std_of_best_values)[trial_numbers]
ax.plot(trial_numbers, means, color="tab:red", label="Best Value")
ax.fill_between(
x=trial_numbers,
y1=np.array(means - stds, float),
y2=np.array(means + stds, float),
color="tab:red",
alpha=0.4,
)
ax.legend()
return ax
|
6,894 |
def upload_from_folder(path, is_private, dropbox_folder, dropbox_client, did_not_upload, error_log):
if not os.path.exists(path):
return
if is_fresh_upload():
response = get_uploaded_files_meta(dropbox_folder, dropbox_client)
else:
response = frappe._dict({"entries": []})
path = text_type(path)
for f in frappe.get_all("File", filters={"is_folder": 0, "is_private": is_private,
"uploaded_to_dropbox": 0}, fields=['file_url', 'name', 'file_name']):
if not f.file_url:
filename = f.filename.rsplit('/',1)[-1]
else:
filename = f.file_url.rsplit('/',1)[-1]
filepath = os.path.join(path, filename)
if filename in ignore_list:
continue
found = False
for file_metadata in response.entries:
try:
if (os.path.basename(filepath) == file_metadata.name
and os.stat(encode(filepath)).st_size == int(file_metadata.size)):
found = True
update_file_dropbox_status(f.name)
break
except Exception:
error_log.append(frappe.get_traceback())
if not found:
try:
upload_file_to_dropbox(filepath, dropbox_folder, dropbox_client)
update_file_dropbox_status(f.name)
except Exception:
did_not_upload.append(filepath)
error_log.append(frappe.get_traceback())
|
def upload_from_folder(path, is_private, dropbox_folder, dropbox_client, did_not_upload, error_log):
if not os.path.exists(path):
return
if is_fresh_upload():
response = get_uploaded_files_meta(dropbox_folder, dropbox_client)
else:
response = frappe._dict({"entries": []})
path = text_type(path)
for f in frappe.get_all("File", filters={"is_folder": 0, "is_private": is_private,
"uploaded_to_dropbox": 0}, fields=['file_url', 'name', 'file_name']):
if not f.file_url:
filename = f.filename.rsplit('/',1)[-1]
else:
filename = f.file_url.rsplit('/', 1)[-1]
filepath = os.path.join(path, filename)
if filename in ignore_list:
continue
found = False
for file_metadata in response.entries:
try:
if (os.path.basename(filepath) == file_metadata.name
and os.stat(encode(filepath)).st_size == int(file_metadata.size)):
found = True
update_file_dropbox_status(f.name)
break
except Exception:
error_log.append(frappe.get_traceback())
if not found:
try:
upload_file_to_dropbox(filepath, dropbox_folder, dropbox_client)
update_file_dropbox_status(f.name)
except Exception:
did_not_upload.append(filepath)
error_log.append(frappe.get_traceback())
|
19,626 |
def mk_relative_linux(f, prefix, rpaths=('lib',), method='LIEF'):
'Respects the original values and converts abs to $ORIGIN-relative'
elf = os.path.join(prefix, f)
origin = os.path.dirname(elf)
existing, _, _ = get_rpaths_raw(elf)
patchelf = external.find_executable('patchelf', prefix)
if not patchelf:
print("ERROR :: You should install patchelf, will proceed with LIEF for {} (was {})".format(elf, method))
method = 'LIEF'
else:
try:
existing_pe = check_output([patchelf, '--print-rpath', elf]).decode('utf-8').splitlines()[0]
except CalledProcessError:
print("ERROR :: `patchelf --print-rpath` failed for {}, will proceed with LIEF (was {})".format(
elf, method))
method = 'LIEF'
else:
existing_pe = existing_pe.split(os.pathsep)
if have_lief:
existing2, _, _ = get_rpaths_raw(elf)
if [existing_pe] != existing2:
print('ERROR :: get_rpaths_raw()={} and patchelf={} disagree for {} :: '.format(
existing2, [existing_pe], elf))
# Use LIEF if method is LIEF to get the initial value?
existing = existing_pe.split(os.pathsep)
new = []
for old in existing:
if old.startswith('$ORIGIN'):
new.append(old)
elif old.startswith('/'):
# Test if this absolute path is outside of prefix. That is fatal.
rp = relpath(old, prefix)
if rp.startswith('..' + os.sep):
print('Warning: rpath {0} is outside prefix {1} (removing it)'.format(old, prefix))
else:
rp = '$ORIGIN/' + relpath(old, origin)
if rp not in new:
new.append(rp)
# Ensure that the asked-for paths are also in new.
for rpath in rpaths:
if rpath != '':
if not rpath.startswith('/'):
# IMHO utils.relative shouldn't exist, but I am too paranoid to remove
# it, so instead, make sure that what I think it should be replaced by
# gives the same result and assert if not. Yeah, I am a chicken.
rel_ours = os.path.normpath(utils.relative(f, rpath))
rel_stdlib = os.path.normpath(os.path.relpath(rpath, os.path.dirname(f)))
if not rel_ours == rel_stdlib:
raise ValueError('utils.relative {0} and relpath {1} disagree for {2}, {3}'.format(
rel_ours, rel_stdlib, f, rpath))
rpath = '$ORIGIN/' + rel_stdlib
if rpath not in new:
new.append(rpath)
rpath = ':'.join(new)
# check_binary_patchers(elf, prefix, rpath)
if method.upper() == 'LIEF' or not patchelf:
set_rpath(old_matching='*', new_rpath=rpath, file=elf)
else:
call([patchelf, '--force-rpath', '--set-rpath', rpath, elf])
|
def mk_relative_linux(f, prefix, rpaths=('lib',), method='LIEF'):
'Respects the original values and converts abs to $ORIGIN-relative'
elf = os.path.join(prefix, f)
origin = os.path.dirname(elf)
existing, _, _ = get_rpaths_raw(elf)
patchelf = external.find_executable('patchelf', prefix)
if not patchelf:
print("WARNING :: You should install patchelf, will proceed with LIEF for {} (was {})".format(elf, method))
method = 'LIEF'
else:
try:
existing_pe = check_output([patchelf, '--print-rpath', elf]).decode('utf-8').splitlines()[0]
except CalledProcessError:
print("ERROR :: `patchelf --print-rpath` failed for {}, will proceed with LIEF (was {})".format(
elf, method))
method = 'LIEF'
else:
existing_pe = existing_pe.split(os.pathsep)
if have_lief:
existing2, _, _ = get_rpaths_raw(elf)
if [existing_pe] != existing2:
print('ERROR :: get_rpaths_raw()={} and patchelf={} disagree for {} :: '.format(
existing2, [existing_pe], elf))
# Use LIEF if method is LIEF to get the initial value?
existing = existing_pe.split(os.pathsep)
new = []
for old in existing:
if old.startswith('$ORIGIN'):
new.append(old)
elif old.startswith('/'):
# Test if this absolute path is outside of prefix. That is fatal.
rp = relpath(old, prefix)
if rp.startswith('..' + os.sep):
print('Warning: rpath {0} is outside prefix {1} (removing it)'.format(old, prefix))
else:
rp = '$ORIGIN/' + relpath(old, origin)
if rp not in new:
new.append(rp)
# Ensure that the asked-for paths are also in new.
for rpath in rpaths:
if rpath != '':
if not rpath.startswith('/'):
# IMHO utils.relative shouldn't exist, but I am too paranoid to remove
# it, so instead, make sure that what I think it should be replaced by
# gives the same result and assert if not. Yeah, I am a chicken.
rel_ours = os.path.normpath(utils.relative(f, rpath))
rel_stdlib = os.path.normpath(os.path.relpath(rpath, os.path.dirname(f)))
if not rel_ours == rel_stdlib:
raise ValueError('utils.relative {0} and relpath {1} disagree for {2}, {3}'.format(
rel_ours, rel_stdlib, f, rpath))
rpath = '$ORIGIN/' + rel_stdlib
if rpath not in new:
new.append(rpath)
rpath = ':'.join(new)
# check_binary_patchers(elf, prefix, rpath)
if method.upper() == 'LIEF' or not patchelf:
set_rpath(old_matching='*', new_rpath=rpath, file=elf)
else:
call([patchelf, '--force-rpath', '--set-rpath', rpath, elf])
|
31,173 |
def get_endpoints_command(client, args):
page_number = arg_to_int(
arg=args.get('page'),
arg_name='Failed to parse "page". Must be a number.',
required=True
)
limit = arg_to_int(
arg=args.get('limit'),
arg_name='Failed to parse "limit". Must be a number.',
required=True
)
if list(args.keys()) == ['limit', 'page', 'sort_order']:
endpoints = client.get_endpoints(page_number=page_number, limit=limit, no_filter=True)
else:
endpoint_id_list = argToList(args.get('endpoint_id_list'))
dist_name = argToList(args.get('dist_name'))
ip_list = argToList(args.get('ip_list'))
group_name = argToList(args.get('group_name'))
platform = argToList(args.get('platform'))
alias_name = argToList(args.get('alias_name'))
isolate = args.get('isolate')
hostname = argToList(args.get('hostname'))
first_seen_gte = arg_to_timestamp(
arg=args.get('first_seen_gte'),
arg_name='first_seen_gte'
)
first_seen_lte = arg_to_timestamp(
arg=args.get('first_seen_lte'),
arg_name='first_seen_lte'
)
last_seen_gte = arg_to_timestamp(
arg=args.get('last_seen_gte'),
arg_name='last_seen_gte'
)
last_seen_lte = arg_to_timestamp(
arg=args.get('last_seen_lte'),
arg_name='last_seen_lte'
)
sort_by_first_seen = args.get('sort_by_first_seen')
sort_by_last_seen = args.get('sort_by_last_seen')
endpoints = client.get_endpoints(
endpoint_id_list=endpoint_id_list,
dist_name=dist_name,
ip_list=ip_list,
group_name=group_name,
platform=platform,
alias_name=alias_name,
isolate=isolate,
hostname=hostname,
page_number=page_number,
limit=limit,
first_seen_gte=first_seen_gte,
first_seen_lte=first_seen_lte,
last_seen_gte=last_seen_gte,
last_seen_lte=last_seen_lte,
sort_by_first_seen=sort_by_first_seen,
sort_by_last_seen=sort_by_last_seen
)
context = {
f'{INTEGRATION_CONTEXT_BRAND}.Endpoint(val.endpoint_id == obj.endpoint_id)': endpoints,
'Endpoint(val.ID == obj.ID)': return_endpoint_standard_context(endpoints)
}
account_context = create_account_context(endpoints)
if account_context:
context[Common.Account.CONTEXT_PATH] = account_context
return (
tableToMarkdown('Endpoints', endpoints),
context,
endpoints
)
|
def get_endpoints_command(client, args):
page_number = arg_to_int(
arg=args.get('page'),
arg_name='Failed to parse "page". Must be a number.',
required=True
)
limit = arg_to_int(
arg=args.get('limit'),
arg_name='Failed to parse "limit". Must be a number.',
required=True
)
if list(args.keys()) == ['limit', 'page', 'sort_order']:
endpoints = client.get_endpoints(page_number=page_number, limit=limit, no_filter=True)
else:
endpoint_id_list = argToList(args.get('endpoint_id_list'))
dist_name = argToList(args.get('dist_name'))
ip_list = argToList(args.get('ip_list'))
group_name = argToList(args.get('group_name'))
platform = argToList(args.get('platform'))
alias_name = argToList(args.get('alias_name'))
isolate = args.get('isolate')
hostname = argToList(args.get('hostname'))
first_seen_gte = arg_to_timestamp(
arg=args.get('first_seen_gte'),
arg_name='first_seen_gte'
)
first_seen_lte = arg_to_timestamp(
arg=args.get('first_seen_lte'),
arg_name='first_seen_lte'
)
last_seen_gte = arg_to_timestamp(
arg=args.get('last_seen_gte'),
arg_name='last_seen_gte'
)
last_seen_lte = arg_to_timestamp(
arg=args.get('last_seen_lte'),
arg_name='last_seen_lte'
)
sort_by_first_seen = args.get('sort_by_first_seen')
sort_by_last_seen = args.get('sort_by_last_seen')
endpoints = client.get_endpoints(
endpoint_id_list=endpoint_id_list,
dist_name=dist_name,
ip_list=ip_list,
group_name=group_name,
platform=platform,
alias_name=alias_name,
isolate=isolate,
hostname=hostname,
page_number=page_number,
limit=limit,
first_seen_gte=first_seen_gte,
first_seen_lte=first_seen_lte,
last_seen_gte=last_seen_gte,
last_seen_lte=last_seen_lte,
sort_by_first_seen=sort_by_first_seen,
sort_by_last_seen=sort_by_last_seen
)
context = {
f'{INTEGRATION_CONTEXT_BRAND}.Endpoint(val.endpoint_id == obj.endpoint_id)': endpoints,
Common.Endpoint.CONTEXT_PATH: return_endpoint_standard_context(endpoints),
}
account_context = create_account_context(endpoints)
if account_context:
context[Common.Account.CONTEXT_PATH] = account_context
return (
tableToMarkdown('Endpoints', endpoints),
context,
endpoints
)
|
39,913 |
def test_ursula_and_local_keystore_signer_integration(click_runner,
custom_filepath,
stakeholder_configuration_file_location,
custom_config_filepath,
manual_staker,
stake_value,
token_economics,
worker_account,
worker_address,
mocker,
testerchain):
#
# Stakeholder Steps
#
init_args = ('stake', 'init-stakeholder',
'--config-root', custom_filepath,
'--provider', TEST_PROVIDER_URI,
'--network', TEMPORARY_DOMAIN)
click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False)
stake_args = ('stake', 'create',
'--config-file', stakeholder_configuration_file_location,
'--staking-address', manual_staker,
'--value', stake_value.to_tokens(),
'--lock-periods', token_economics.minimum_locked_periods,
'--force')
# TODO: Is This test is writing to the default system directory and ignoring updates to the passed filepath?
user_input = f'0\n' + f'{INSECURE_DEVELOPMENT_PASSWORD}\n' + f'Y\n'
click_runner.invoke(nucypher_cli, stake_args, input=user_input, catch_exceptions=False)
init_args = ('stake', 'set-worker',
'--config-file', stakeholder_configuration_file_location,
'--staking-address', manual_staker,
'--worker-address', worker_address,
'--force')
user_input = INSECURE_DEVELOPMENT_PASSWORD
click_runner.invoke(nucypher_cli, init_args, input=user_input, catch_exceptions=False)
#
# Worker Steps
#
# Good signer...
pre_config_signer = KeystoreSigner.from_signer_uri(uri=MOCK_SIGNER_URI)
assert worker_account.address in pre_config_signer.accounts
init_args = ('ursula', 'init',
'--network', TEMPORARY_DOMAIN,
'--worker-address', worker_account.address,
'--config-root', custom_filepath,
'--provider', TEST_PROVIDER_URI,
'--rest-host', MOCK_IP_ADDRESS,
'--rest-port', MOCK_URSULA_STARTING_PORT,
# The bit were' testing for here
'--signer', MOCK_SIGNER_URI)
result = click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False, env=CLI_ENV)
assert result.exit_code == 0, result.stdout
# Inspect the configuration file for the signer URI
with open(custom_config_filepath, 'r') as config_file:
raw_config_data = config_file.read()
config_data = json.loads(raw_config_data)
assert config_data['signer_uri'] == MOCK_SIGNER_URI,\
"Keystore URI was not correctly included in configuration file"
# Recreate a configuration with the signer URI preserved
ursula_config = UrsulaConfiguration.from_configuration_file(custom_config_filepath)
assert ursula_config.signer_uri == MOCK_SIGNER_URI
# Mock decryption of web3 client keyring
mocker.patch.object(Account, 'decrypt', return_value=worker_account.privateKey)
ursula_config.attach_keyring(checksum_address=worker_account.address)
ursula_config.keyring.unlock(password=INSECURE_DEVELOPMENT_PASSWORD)
# Produce an ursula with a Keystore signer correctly derived from the signer URI, and dont do anything else!
mocker.patch.object(StakeList, 'refresh', autospec=True)
ursula = ursula_config.produce(client_password=INSECURE_DEVELOPMENT_PASSWORD,
block_until_ready=False)
# Verify the keystore path is still preserved
assert isinstance(ursula.signer, KeystoreSigner)
assert ursula.signer.path == Path(MOCK_KEYSTORE_PATH)
# Show that we can produce the exact same signer as pre-config...
assert pre_config_signer.path == ursula.signer.path
# ...and that transactions are signed by the keytore signer
receipt = ursula.confirm_activity()
transaction_data = testerchain.client.w3.eth.getTransaction(receipt['transactionHash'])
assert transaction_data['from'] == worker_account.address
|
def test_ursula_and_local_keystore_signer_integration(click_runner,
custom_filepath,
stakeholder_configuration_file_location,
custom_config_filepath,
manual_staker,
stake_value,
token_economics,
worker_account,
worker_address,
mocker,
testerchain):
#
# Stakeholder Steps
#
init_args = ('stake', 'init-stakeholder',
'--config-root', custom_filepath,
'--provider', TEST_PROVIDER_URI,
'--network', TEMPORARY_DOMAIN)
click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False)
stake_args = ('stake', 'create',
'--config-file', stakeholder_configuration_file_location,
'--staking-address', manual_staker,
'--value', stake_value.to_tokens(),
'--lock-periods', token_economics.minimum_locked_periods,
'--force')
# TODO: Is This test is writing to the default system directory and ignoring updates to the passed filepath?
user_input = f'0\n' + f'{INSECURE_DEVELOPMENT_PASSWORD}\n' + f'Y\n'
click_runner.invoke(nucypher_cli, stake_args, input=user_input, catch_exceptions=False)
init_args = ('stake', 'set-worker',
'--config-file', stakeholder_configuration_file_location,
'--staking-address', manual_staker,
'--worker-address', worker_address,
'--force')
user_input = INSECURE_DEVELOPMENT_PASSWORD
click_runner.invoke(nucypher_cli, init_args, input=user_input, catch_exceptions=False)
#
# Worker Steps
#
# Good signer...
pre_config_signer = KeystoreSigner.from_signer_uri(uri=MOCK_SIGNER_URI)
assert worker_account.address in pre_config_signer.accounts
init_args = ('ursula', 'init',
'--network', TEMPORARY_DOMAIN,
'--worker-address', worker_account.address,
'--config-root', custom_filepath,
'--provider', TEST_PROVIDER_URI,
'--rest-host', MOCK_IP_ADDRESS,
'--rest-port', MOCK_URSULA_STARTING_PORT,
# The bit we're testing for here
'--signer', MOCK_SIGNER_URI)
result = click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False, env=CLI_ENV)
assert result.exit_code == 0, result.stdout
# Inspect the configuration file for the signer URI
with open(custom_config_filepath, 'r') as config_file:
raw_config_data = config_file.read()
config_data = json.loads(raw_config_data)
assert config_data['signer_uri'] == MOCK_SIGNER_URI,\
"Keystore URI was not correctly included in configuration file"
# Recreate a configuration with the signer URI preserved
ursula_config = UrsulaConfiguration.from_configuration_file(custom_config_filepath)
assert ursula_config.signer_uri == MOCK_SIGNER_URI
# Mock decryption of web3 client keyring
mocker.patch.object(Account, 'decrypt', return_value=worker_account.privateKey)
ursula_config.attach_keyring(checksum_address=worker_account.address)
ursula_config.keyring.unlock(password=INSECURE_DEVELOPMENT_PASSWORD)
# Produce an ursula with a Keystore signer correctly derived from the signer URI, and dont do anything else!
mocker.patch.object(StakeList, 'refresh', autospec=True)
ursula = ursula_config.produce(client_password=INSECURE_DEVELOPMENT_PASSWORD,
block_until_ready=False)
# Verify the keystore path is still preserved
assert isinstance(ursula.signer, KeystoreSigner)
assert ursula.signer.path == Path(MOCK_KEYSTORE_PATH)
# Show that we can produce the exact same signer as pre-config...
assert pre_config_signer.path == ursula.signer.path
# ...and that transactions are signed by the keytore signer
receipt = ursula.confirm_activity()
transaction_data = testerchain.client.w3.eth.getTransaction(receipt['transactionHash'])
assert transaction_data['from'] == worker_account.address
|
58,839 |
def _exec_fftn(a, direction, value_type, norm, axes, overwrite_x,
plan=None, out=None, out_size=None):
fft_type = _convert_fft_type(a.dtype, value_type)
if a.flags.c_contiguous:
order = 'C'
elif a.flags.f_contiguous:
order = 'F'
else:
raise ValueError('a must be contiguous')
if (value_type == 'C2R' and not overwrite_x and
10010 <= cupy.cuda.runtime.runtimeGetVersion()):
# The input array may be modified in CUDA 10.1 and above.
# See #3763 for the discussion.
a = a.copy()
elif cupy.cuda.runtime.is_hip and value_type != 'C2C':
# hipFFT's R2C would overwrite input
# hipFFT's C2R PlanNd is actually not in use so it's fine here
a = a.copy()
# plan search precedence:
# 1. plan passed in as an argument
# 2. plan as context manager
# 3. cached plan
# 4. create a new one
curr_plan = cufft.get_current_plan()
if curr_plan is not None:
plan = curr_plan
# don't check repeated usage; it's done in _default_fft_func()
if plan is None:
# search from cache, and generate a plan if not found
plan = _get_cufft_plan_nd(a.shape, fft_type, axes=axes, order=order,
out_size=out_size)
else:
if not isinstance(plan, cufft.PlanNd):
raise ValueError('expected plan to have type cufft.PlanNd')
if order != plan.order:
raise ValueError('array orders mismatch (plan: {}, input: {})'
.format(plan.order, order))
if a.flags.c_contiguous:
expected_shape = [a.shape[ax] for ax in axes]
if value_type == 'C2R':
expected_shape[-1] = out_size
else:
# plan.shape will be reversed for Fortran-ordered inputs
expected_shape = [a.shape[ax] for ax in axes[::-1]]
# TODO(leofang): modify the shape for C2R
expected_shape = tuple(expected_shape)
if expected_shape != plan.shape:
raise ValueError(
'The cuFFT plan and a.shape do not match: '
'plan.shape = {}, expected_shape={}, a.shape = {}'.format(
plan.shape, expected_shape, a.shape))
if fft_type != plan.fft_type:
raise ValueError('cuFFT plan dtype mismatch.')
if value_type != 'C2C':
if axes[-1] != plan.last_axis:
raise ValueError('The last axis for R2C/C2R mismatch')
if out_size != plan.last_size:
raise ValueError('The size along the last R2C/C2R axis '
'mismatch')
# TODO(leofang): support in-place transform for R2C/C2R
if overwrite_x and value_type == 'C2C':
out = a
elif out is None:
out = plan.get_output_array(a, order=order)
else:
plan.check_output_array(a, out)
if out.size != 0:
plan.fft(a, out, direction)
# normalize by the product of the shape along the transformed axes
arr = a if fft_type in (cufft.CUFFT_R2C, cufft.CUFFT_D2Z) else out
sz = _prod([arr.shape[ax] for ax in axes])
if norm == 'backward':
if direction == cufft.CUFFT_INVERSE:
out /= sz
elif norm == 'ortho':
out /= math.sqrt(sz)
elif norm == 'forward':
if direction == cufft.CUFFT_FORWARD:
out /= sz
return out
|
def _exec_fftn(a, direction, value_type, norm, axes, overwrite_x,
plan=None, out=None, out_size=None):
fft_type = _convert_fft_type(a.dtype, value_type)
if a.flags.c_contiguous:
order = 'C'
elif a.flags.f_contiguous:
order = 'F'
else:
raise ValueError('a must be contiguous')
if (value_type == 'C2R' and not overwrite_x and
10010 <= cupy.cuda.runtime.runtimeGetVersion()):
# The input array may be modified in CUDA 10.1 and above.
# See #3763 for the discussion.
a = a.copy()
elif cupy.cuda.runtime.is_hip and value_type != 'C2C':
# hipFFT's R2C would overwrite input
# hipFFT's C2R PlanNd is actually not in use so it's fine here
a = a.copy()
# plan search precedence:
# 1. plan passed in as an argument
# 2. plan as context manager
# 3. cached plan
# 4. create a new one
curr_plan = cufft.get_current_plan()
if curr_plan is not None:
plan = curr_plan
# don't check repeated usage; it's done in _default_fft_func()
if plan is None:
# search from cache, and generate a plan if not found
plan = _get_cufft_plan_nd(a.shape, fft_type, axes=axes, order=order,
out_size=out_size)
else:
if not isinstance(plan, cufft.PlanNd):
raise ValueError('expected plan to have type cufft.PlanNd')
if order != plan.order:
raise ValueError('array orders mismatch (plan: {}, input: {})'
.format(plan.order, order))
if a.flags.c_contiguous:
expected_shape = [a.shape[ax] for ax in axes]
if value_type == 'C2R':
expected_shape[-1] = out_size
else:
# plan.shape will be reversed for Fortran-ordered inputs
expected_shape = [a.shape[ax] for ax in axes[::-1]]
# TODO(leofang): modify the shape for C2R
expected_shape = tuple(expected_shape)
if expected_shape != plan.shape:
raise ValueError(
'The cuFFT plan and a.shape do not match: '
'plan.shape = {}, expected_shape={}, a.shape = {}'.format(
plan.shape, expected_shape, a.shape))
if fft_type != plan.fft_type:
raise ValueError('cuFFT plan dtype mismatch.')
if value_type != 'C2C':
if axes[-1] != plan.last_axis:
raise ValueError('The last axis for R2C/C2R mismatch')
if out_size != plan.last_size:
raise ValueError('The size along the last R2C/C2R axis '
'mismatch')
# TODO(leofang): support in-place transform for R2C/C2R
if overwrite_x and value_type == 'C2C':
out = a
elif out is None:
out = plan.get_output_array(a, order=order)
else:
plan.check_output_array(a, out)
if out.size != 0:
plan.fft(a, out, direction)
# normalize by the product of the shape along the transformed axes
arr = a if fft_type in (cufft.CUFFT_R2C, cufft.CUFFT_D2Z) else out
sz = _prod([arr.shape[ax] for ax in axes])
if norm == 'backward' and direction == cufft.CUFFT_INVERSE:
out /= sz
elif norm == 'ortho':
out /= math.sqrt(sz)
elif norm == 'forward':
if direction == cufft.CUFFT_FORWARD:
out /= sz
return out
|
51,415 |
def arrayToQPath(x, y, connect='all'):
"""Convert an array of x,y coordinats to QPainterPath as efficiently as possible.
The *connect* argument may be 'all', indicating that each point should be
connected to the next; 'pairs', indicating that each pair of points
should be connected, or an array of int32 values (0 or 1) indicating
connections.
"""
## Create all vertices in path. The method used below creates a binary format so that all
## vertices can be read in at once. This binary format may change in future versions of Qt,
## so the original (slower) method is left here for emergencies:
#path.moveTo(x[0], y[0])
#if connect == 'all':
#for i in range(1, y.shape[0]):
#path.lineTo(x[i], y[i])
#elif connect == 'pairs':
#for i in range(1, y.shape[0]):
#if i%2 == 0:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#elif isinstance(connect, np.ndarray):
#for i in range(1, y.shape[0]):
#if connect[i] == 1:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#else:
#raise Exception('connect argument must be "all", "pairs", or array')
## Speed this up using >> operator
## Format is:
## numVerts(i4)
## 0(i4) x(f8) y(f8) <-- 0 means this vertex does not connect
## 1(i4) x(f8) y(f8) <-- 1 means this vertex connects to the previous vertex
## ...
## cStart(i4) fillRule(i4)
##
## see: https://github.com/qt/qtbase/blob/dev/src/gui/painting/qpainterpath.cpp
## All values are big endian--pack using struct.pack('>d') or struct.pack('>i')
path = QtGui.QPainterPath()
n = x.shape[0]
# create empty array, pad with extra space on either end
arr = np.empty(n+2, dtype=[('c', '>i4'), ('x', '>f8'), ('y', '>f8')])
# write first two integers
byteview = arr.view(dtype=np.ubyte)
byteview[:16] = 0
byteview.data[16:20] = struct.pack('>i', n)
# Fill array with vertex values
arr[1:-1]['x'] = x
arr[1:-1]['y'] = y
# inf/nans completely prevent the plot from being displayed starting on
# Qt version 5.12.3; these must now be manually cleaned out.
isfinite = None
qtver = [int(x) for x in QtVersion.split('.')]
if qtver >= [5, 12, 3]:
isfinite = np.isfinite(x) & np.isfinite(y)
if not np.all(isfinite):
# credit: Divakar https://stackoverflow.com/a/41191127/643629
mask = ~isfinite
idx = np.arange(len(x))
idx[mask] = -1
np.maximum.accumulate(idx, out=idx)
first = np.searchsorted(idx, 0)
idx[:first] = first
arr[1:-1] = arr[1:-1][idx]
# decide which points are connected by lines
if eq(connect, 'all'):
arr[1:-1]['c'] = 1
elif eq(connect, 'pairs'):
arr[1:-1]['c'][::2] = 0
arr[1:-1]['c'][1::2] = 1 # connect every 2nd point to every 1st one
elif eq(connect, 'finite'):
# Let's call a point with either x or y being nan is an invalid point.
# A point will anyway not connect to an invalid point regardless of the
# 'c' value of the invalid point. Therefore, we should set 'c' to 0 for
# the next point of an invalid point.
if isfinite is None:
isfinite = np.isfinite(x) & np.isfinite(y)
arr[2:]['c'] = isfinite
elif isinstance(connect, np.ndarray):
arr[1:-1]['c'] = connect
else:
raise Exception('connect argument must be "all", "pairs", "finite", or array')
arr[1]['c'] = 0 # the first vertex has no previous vertex to connect
byteview.data[-20:-16] = struct.pack('>i', 0) # cStart
byteview.data[-16:-12] = struct.pack('>i', 0) # fillRule (Qt.OddEvenFill)
# create datastream object and stream into path
## Avoiding this method because QByteArray(str) leaks memory in PySide
#buf = QtCore.QByteArray(arr.data[12:lastInd+4]) # I think one unnecessary copy happens here
path.strn = byteview.data[16:-12] # make sure data doesn't run away
try:
buf = QtCore.QByteArray.fromRawData(path.strn)
except TypeError:
buf = QtCore.QByteArray(bytes(path.strn))
ds = QtCore.QDataStream(buf)
ds >> path
return path
|
def arrayToQPath(x, y, connect='all'):
"""Convert an array of x,y coordinats to QPainterPath as efficiently as possible.
The *connect* argument may be 'all', indicating that each point should be
connected to the next; 'pairs', indicating that each pair of points
should be connected, or an array of int32 values (0 or 1) indicating
connections.
"""
## Create all vertices in path. The method used below creates a binary format so that all
## vertices can be read in at once. This binary format may change in future versions of Qt,
## so the original (slower) method is left here for emergencies:
#path.moveTo(x[0], y[0])
#if connect == 'all':
#for i in range(1, y.shape[0]):
#path.lineTo(x[i], y[i])
#elif connect == 'pairs':
#for i in range(1, y.shape[0]):
#if i%2 == 0:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#elif isinstance(connect, np.ndarray):
#for i in range(1, y.shape[0]):
#if connect[i] == 1:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#else:
#raise Exception('connect argument must be "all", "pairs", or array')
## Speed this up using >> operator
## Format is:
## numVerts(i4)
## 0(i4) x(f8) y(f8) <-- 0 means this vertex does not connect
## 1(i4) x(f8) y(f8) <-- 1 means this vertex connects to the previous vertex
## ...
## cStart(i4) fillRule(i4)
##
## see: https://github.com/qt/qtbase/blob/dev/src/gui/painting/qpainterpath.cpp
## All values are big endian--pack using struct.pack('>d') or struct.pack('>i')
path = QtGui.QPainterPath()
n = x.shape[0]
# create empty array, pad with extra space on either end
arr = np.empty(n+2, dtype=[('c', '>i4'), ('x', '>f8'), ('y', '>f8')])
# write first two integers
byteview = arr.view(dtype=np.ubyte)
byteview[:16] = 0
byteview.data[16:20] = struct.pack('>i', n)
# Fill array with vertex values
arr[1:-1]['x'] = x
arr[1:-1]['y'] = y
# inf/nans completely prevent the plot from being displayed starting on
# Qt version 5.12.3; these must now be manually cleaned out.
isfinite = None
qtver = [int(x) for x in QtVersion.split('.')]
if qtver >= [5, 12, 3]:
isfinite = np.isfinite(x) & np.isfinite(y)
if not np.all(isfinite):
# credit: Divakar https://stackoverflow.com/a/41191127/643629
mask = ~isfinite
idx = np.arange(len(x))
idx[mask] = -1
np.maximum.accumulate(idx, out=idx)
first = np.searchsorted(idx, 0)
if first < len(x):
# Replace all non-finite entries from beginning of arr with the first finite one
idx[:first] = first
arr[1:-1] = arr[1:-1][idx]
# decide which points are connected by lines
if eq(connect, 'all'):
arr[1:-1]['c'] = 1
elif eq(connect, 'pairs'):
arr[1:-1]['c'][::2] = 0
arr[1:-1]['c'][1::2] = 1 # connect every 2nd point to every 1st one
elif eq(connect, 'finite'):
# Let's call a point with either x or y being nan is an invalid point.
# A point will anyway not connect to an invalid point regardless of the
# 'c' value of the invalid point. Therefore, we should set 'c' to 0 for
# the next point of an invalid point.
if isfinite is None:
isfinite = np.isfinite(x) & np.isfinite(y)
arr[2:]['c'] = isfinite
elif isinstance(connect, np.ndarray):
arr[1:-1]['c'] = connect
else:
raise Exception('connect argument must be "all", "pairs", "finite", or array')
arr[1]['c'] = 0 # the first vertex has no previous vertex to connect
byteview.data[-20:-16] = struct.pack('>i', 0) # cStart
byteview.data[-16:-12] = struct.pack('>i', 0) # fillRule (Qt.OddEvenFill)
# create datastream object and stream into path
## Avoiding this method because QByteArray(str) leaks memory in PySide
#buf = QtCore.QByteArray(arr.data[12:lastInd+4]) # I think one unnecessary copy happens here
path.strn = byteview.data[16:-12] # make sure data doesn't run away
try:
buf = QtCore.QByteArray.fromRawData(path.strn)
except TypeError:
buf = QtCore.QByteArray(bytes(path.strn))
ds = QtCore.QDataStream(buf)
ds >> path
return path
|
14,166 |
def test_buff_big_endian():
orig_str = "011011001001"
orig_bytes = b'\x06\xC9' # padding is the high bits of the first byte
v = BinaryValue(value=orig_str, n_bits=12, bigEndian=True)
assert v.buff == orig_bytes
# should be unchanged
v.buff = orig_bytes
assert v.buff == orig_bytes
assert v.binstr == orig_str
# extra bits are stripped
v.buff = b'\xF6\xC9'
assert v.buff == orig_bytes
assert v.binstr == orig_str
|
def test_buff_big_endian():
orig_str = "011011001001"
orig_bytes = b'\x06\xC9' # padding is the high bits of the first byte
v = BinaryValue(value=orig_str, n_bits=12, bigEndian=True)
assert v.buff == orig_bytes
# should be unchanged
v.buff = orig_bytes
assert v.buff == orig_bytes
assert v.binstr == orig_str
# extra bits are stripped because they don't fit into the 12 bits
v.buff = b'\xF6\xC9'
assert v.buff == orig_bytes
assert v.binstr == orig_str
|
37,173 |
def pad(schedule: Schedule,
channels: Optional[Iterable[Channel]] = None,
until: Optional[int] = None) -> Schedule:
"""Pad the input ``Schedule`` with ``Delay`` s on all unoccupied timeslots until ``until``
if it is provided, otherwise until ``schedule.duration``.
Args:
schedule: Schedule to pad.
channels: Channels to pad. Defaults to all channels in ``schedule`` if not provided.
If the supplied channel is not a member of ``schedule``, it will be added.
until: Time to pad until. Defaults to ``schedule.duration`` if not provided.
Returns:
The padded schedule.
"""
until = until or schedule.duration
channels = channels or schedule.channels
occupied_channels = schedule.channels
unoccupied_channels = set(channels) - set(occupied_channels)
empty_timeslot_collection = schedule.timeslots.complement(until)
for channel in channels:
for timeslot in empty_timeslot_collection.ch_timeslots(channel):
schedule |= Delay(timeslot.duration)(timeslot.channel).shift(timeslot.start)
for channel in unoccupied_channels:
schedule |= Delay(until)(channel)
return schedule
|
def pad(schedule: Schedule,
channels: Optional[Iterable[Channel]] = None,
until: Optional[int] = None) -> Schedule:
"""Pad the input ``Schedule`` with ``Delay``\s on all unoccupied timeslots until ``until``
if it is provided, otherwise until ``schedule.duration``.
Args:
schedule: Schedule to pad.
channels: Channels to pad. Defaults to all channels in ``schedule`` if not provided.
If the supplied channel is not a member of ``schedule``, it will be added.
until: Time to pad until. Defaults to ``schedule.duration`` if not provided.
Returns:
The padded schedule.
"""
until = until or schedule.duration
channels = channels or schedule.channels
occupied_channels = schedule.channels
unoccupied_channels = set(channels) - set(occupied_channels)
empty_timeslot_collection = schedule.timeslots.complement(until)
for channel in channels:
for timeslot in empty_timeslot_collection.ch_timeslots(channel):
schedule |= Delay(timeslot.duration)(timeslot.channel).shift(timeslot.start)
for channel in unoccupied_channels:
schedule |= Delay(until)(channel)
return schedule
|
24,878 |
def _has_different_parameters(
original: List[nodes.AssignName],
overridden: List[nodes.AssignName],
dummy_parameter_regex: Pattern,
) -> List[str]:
result = []
zipped = zip_longest(original, overridden)
for original_param, overridden_param in zipped:
if not overridden_param:
return ["Number of parameters "]
if not original_param:
try:
overridden_param.parent.default_value(overridden_param.name)
continue
except astroid.NoDefault:
return ["Number of parameters "]
params = (original_param, overridden_param)
# check for the arguments' name
names = [param.name for param in params]
if any(dummy_parameter_regex.match(name) for name in names):
continue
if original_param.name != overridden_param.name:
result.append(
f"Parameter '{original_param.name}' has been renamed "
f"to '{overridden_param.name}' in"
)
return result
|
def _has_different_parameters(
original: List[nodes.AssignName],
overridden: List[nodes.AssignName],
dummy_parameter_regex: Pattern,
) -> List[str]:
result = []
zipped = zip_longest(original, overridden)
for original_param, overridden_param in zipped:
if not overridden_param:
return ["Number of parameters "]
if not original_param:
try:
overridden_param.parent.default_value(overridden_param.name)
continue
except astroid.NoDefault:
return ["Number of parameters "]
params = (original_param, overridden_param)
# check for the arguments' name
names = [param.name for param in (original_param, overridden_param)]
if any(dummy_parameter_regex.match(name) for name in names):
continue
if original_param.name != overridden_param.name:
result.append(
f"Parameter '{original_param.name}' has been renamed "
f"to '{overridden_param.name}' in"
)
return result
|
23,548 |
def output():
"""Creates an context manager corresponding to configured context ouput"""
if( not config_output or config_output == "stdout" ):
return StdOutput()
else:
return open(str( config_output ), "w")
|
def output():
"""Creates a context manager corresponding to configured context ouput"""
if( not config_output or config_output == "stdout" ):
return StdOutput()
else:
return open(str( config_output ), "w")
|
8,304 |
def extract_info(file_name, fields):
"""
Read a yaml file and return a dictionary mapping the fields to the
values of those fields in the file.
The returned dictionary will always contain all the provided
fields, mapping any non-existent ones to ''.
Assumes fields are set in a format of:
{'meta': [{'field' : value, 'field2' : value2}]
or in yaml:
meta:
- field: value
field2: value2
If 'meta' is present but not in this format, prints an error
message and raises ParseError.
"""
empty_result = {f: '' for f in fields}
if os.path.isdir(file_name) or not file_name.endswith('.yaml'):
return empty_result
with open(file_name, 'r') as f:
parsed = yaml.safe_load(f)
if not isinstance(parsed, dict):
return empty_result
meta = parsed.get('meta', [{}])
if not (isinstance(meta, list) and
len(meta) == 1 and
isinstance(meta[0], dict)):
print('Error in meta format in %s' % file_name)
print('Meta must be a list containing exactly one dict.')
print('Meta is: %s' % meta)
raise ParseError()
return {field: meta[0].get(field, '') for field in fields}
|
def extract_info(file_name, fields):
"""
Read a yaml file and return a dictionary mapping the fields to the
values of those fields in the file.
The returned dictionary will always contain all the provided
fields, mapping any non-existent ones to ''.
Assumes fields are set in a format of:
{'meta': [{'field' : value, 'field2' : value2}]
or in yaml:
meta:
- field: value
field2: value2
If 'meta' is present but not in this format, prints an error
message and raises ParseError.
"""
empty_result = {f: '' for f in fields}
if os.path.isdir(file_name) or not file_name.endswith('.yaml'):
return empty_result
with open(file_name, 'r') as f:
parsed = yaml.safe_load(f)
if not isinstance(parsed, dict):
return empty_result
meta = parsed.get('meta', [{}])
if not (isinstance(meta, list) and
len(meta) == 1 and
isinstance(meta[0], dict)):
print('Error in meta format in', file_name)
print('Meta must be a list containing exactly one dict.')
print('Meta is: %s' % meta)
raise ParseError()
return {field: meta[0].get(field, '') for field in fields}
|
30,632 |
def fetch_incidents(client, last_run):
""" Callback to fetch incidents periodically """
last_fetch_time = last_run.get('last_fetch', datetime.utcnow().timestamp() - 60)
site, concentrator, map = get_site_params()
params = demisto.params()
tags = params.get('tags') or None
event_types = params.get('event_types')
zone_events = []
if event_types is None or 'zone_event' in event_types:
zone_events = client.get_zone_events(concentrator=concentrator, map=map,
site=site, tags=tags, since=last_fetch_time)
device_events = []
if event_types is None or 'device_event' in event_types:
device_events = client.get_device_events(concentrator=concentrator, map=map,
site=site, tags=tags,
since=last_fetch_time)
events = zone_events + device_events
incidents = []
for event in events:
event_time = int(event['time_s'])
incident = {
'name': event['event_type'],
'occurred': datetime.utcfromtimestamp(event_time).strftime(
DATE_FORMAT),
'rawJSON': json.dumps(event),
}
incidents.append(incident)
if event_time > last_fetch_time:
last_fetch_time = event_time
next_run = {'last_fetch': last_fetch_time}
return next_run, incidents
|
def fetch_incidents(client, last_run):
""" Callback to fetch incidents periodically """
last_fetch_time = last_run.get('last_fetch', datetime.utcnow().timestamp() - 60)
site, concentrator, map = get_site_params()
params = demisto.params()
tags = params.get('tags')
event_types = params.get('event_types')
zone_events = []
if event_types is None or 'zone_event' in event_types:
zone_events = client.get_zone_events(concentrator=concentrator, map=map,
site=site, tags=tags, since=last_fetch_time)
device_events = []
if event_types is None or 'device_event' in event_types:
device_events = client.get_device_events(concentrator=concentrator, map=map,
site=site, tags=tags,
since=last_fetch_time)
events = zone_events + device_events
incidents = []
for event in events:
event_time = int(event['time_s'])
incident = {
'name': event['event_type'],
'occurred': datetime.utcfromtimestamp(event_time).strftime(
DATE_FORMAT),
'rawJSON': json.dumps(event),
}
incidents.append(incident)
if event_time > last_fetch_time:
last_fetch_time = event_time
next_run = {'last_fetch': last_fetch_time}
return next_run, incidents
|
39,673 |
def main():
module = KatelloEntityAnsibleModule(
entity_spec=dict(
name=dict(required=True),
new_name=dict(),
lifecycle_environment=dict(type='entity', flat_name='environment_id'),
content_view=dict(type='entity', flat_name='content_view_id'),
host_collections=dict(type='entity_list', flat_name='host_collection_ids'),
auto_attach=dict(type='bool'),
release_version=dict(),
service_level=dict(choices=['Self-Support', 'Standard', 'Premium']),
),
argument_spec=dict(
subscriptions=dict(type='list', elements='dict', options=dict(
name=dict(),
pool_id=dict(),
),
required_one_of=[['name', 'pool_id']]
),
content_overrides=dict(type='list', elements='dict', options=dict(
label=dict(required=True),
override=dict(required=True, choices=['enabled', 'disabled']),
)),
state=dict(default='present', choices=['present', 'present_with_defaults', 'absent', 'copied']),
),
required_if=[
['state', 'copied', ['new_name']],
],
)
entity_dict = module.clean_params()
module.connect()
entity_dict['organization'] = module.find_resource_by_name('organizations', entity_dict['organization'], thin=True)
scope = {'organization_id': entity_dict['organization']['id']}
if not module.desired_absent:
if 'lifecycle_environment' in entity_dict:
entity_dict['lifecycle_environment'] = module.find_resource_by_name(
'lifecycle_environments', entity_dict['lifecycle_environment'], params=scope, thin=True)
if 'content_view' in entity_dict:
entity_dict['content_view'] = module.find_resource_by_name('content_views', entity_dict['content_view'], params=scope, thin=True)
entity = module.find_resource_by_name('activation_keys', name=entity_dict['name'], params=scope, failsafe=True)
if module.state == 'copied':
new_entity = module.find_resource_by_name('activation_keys', name=entity_dict['new_name'], params=scope, failsafe=True)
if new_entity is not None:
module.warn("Activation Key '{0}' already exists.".format(entity_dict['new_name']))
module.exit_json(changed=False)
subscriptions = entity_dict.pop('subscriptions', None)
content_overrides = entity_dict.pop('content_overrides', None)
host_collections = entity_dict.pop('host_collections', None)
changed, activation_key = module.ensure_entity('activation_keys', entity_dict, entity, params=scope)
# only update subscriptions of newly created or updated AKs
# copied keys inherit the subscriptions of the origin, so one would not have to specify them again
# deleted keys don't need subscriptions anymore either
if module.state == 'present' or (module.state == 'present_with_defaults' and changed):
# the auto_attach, release_version and service_level parameters can only be set on an existing AK with an update,
# not during create, so let's force an update. see https://projects.theforeman.org/issues/27632 for details
if any(key in entity_dict for key in ['auto_attach', 'release_version', 'service_level']) and changed:
_activation_key_changed, activation_key = module.ensure_entity('activation_keys', entity_dict, activation_key, params=scope)
ak_scope = {'activation_key_id': activation_key['id']}
if subscriptions is not None:
desired_subscriptions = []
for subscription in subscriptions:
if subscription['name'] is not None and subscription['pool_id'] is None:
desired_subscriptions.append(module.find_resource_by_name('subscriptions', subscription['name'], params=scope, thin=True))
if subscription['pool_id'] is not None:
desired_subscriptions.append(module.find_resource_by_id('subscriptions', subscription['pool_id'], params=scope, thin=True))
desired_subscription_ids = set(item['id'] for item in desired_subscriptions)
current_subscriptions = module.list_resource('subscriptions', params=ak_scope)
current_subscription_ids = set(item['id'] for item in current_subscriptions)
if desired_subscription_ids != current_subscription_ids:
ids_to_remove = current_subscription_ids - desired_subscription_ids
if ids_to_remove:
payload = {
'id': activation_key['id'],
'subscriptions': [{'id': item} for item in ids_to_remove],
}
payload.update(scope)
module.resource_action('activation_keys', 'remove_subscriptions', payload)
ids_to_add = desired_subscription_ids - current_subscription_ids
if ids_to_add:
payload = {
'id': activation_key['id'],
'subscriptions': [{'id': item, 'quantity': 1} for item in ids_to_add],
}
payload.update(scope)
module.resource_action('activation_keys', 'add_subscriptions', payload)
changed = True
if content_overrides is not None:
_product_content_changed, product_content = module.resource_action('activation_keys', 'product_content', params={'id': activation_key['id']})
current_content_overrides = {
product['content']['label']: product['enabled_content_override']
for product in product_content['results']
if product['enabled_content_override'] is not None
}
desired_content_overrides = {
product['label']: override_to_boolnone(product['override']) for product in content_overrides
}
changed_content_overrides = []
for label, override in desired_content_overrides.items():
if override != current_content_overrides.pop(label, None):
changed_content_overrides.append({'content_label': label, 'value': override})
for label in current_content_overrides.keys():
changed_content_overrides.append({'content_label': label, 'reset': True})
if changed_content_overrides:
payload = {
'id': activation_key['id'],
'content_overrides': changed_content_overrides,
}
module.resource_action('activation_keys', 'content_override', payload)
changed = True
if host_collections is not None:
if 'host_collection_ids' in activation_key:
current_host_collection_ids = set(activation_key['host_collection_ids'])
else:
current_host_collection_ids = set(item['id'] for item in activation_key['host_collections'])
desired_host_collections = module.find_resources_by_name('host_collections', host_collections, params=scope, thin=True)
desired_host_collection_ids = set(item['id'] for item in desired_host_collections)
if desired_host_collection_ids != current_host_collection_ids:
ids_to_remove = current_host_collection_ids - desired_host_collection_ids
if ids_to_remove:
payload = {
'id': activation_key['id'],
'host_collection_ids': list(ids_to_remove),
}
module.resource_action('activation_keys', 'remove_host_collections', payload)
ids_to_add = desired_host_collection_ids - current_host_collection_ids
if ids_to_add:
payload = {
'id': activation_key['id'],
'host_collection_ids': list(ids_to_add),
}
module.resource_action('activation_keys', 'add_host_collections', payload)
changed = True
module.exit_json(changed=changed)
|
def main():
module = KatelloEntityAnsibleModule(
entity_spec=dict(
name=dict(required=True),
new_name=dict(),
lifecycle_environment=dict(type='entity', flat_name='environment_id'),
content_view=dict(type='entity', flat_name='content_view_id'),
host_collections=dict(type='entity_list', flat_name='host_collection_ids'),
auto_attach=dict(type='bool'),
release_version=dict(),
service_level=dict(choices=['Self-Support', 'Standard', 'Premium']),
),
argument_spec=dict(
subscriptions=dict(type='list', elements='dict', options=dict(
name=dict(),
pool_id=dict(),
),
required_one_of=[['name', 'pool_id']],
),
content_overrides=dict(type='list', elements='dict', options=dict(
label=dict(required=True),
override=dict(required=True, choices=['enabled', 'disabled']),
)),
state=dict(default='present', choices=['present', 'present_with_defaults', 'absent', 'copied']),
),
required_if=[
['state', 'copied', ['new_name']],
],
)
entity_dict = module.clean_params()
module.connect()
entity_dict['organization'] = module.find_resource_by_name('organizations', entity_dict['organization'], thin=True)
scope = {'organization_id': entity_dict['organization']['id']}
if not module.desired_absent:
if 'lifecycle_environment' in entity_dict:
entity_dict['lifecycle_environment'] = module.find_resource_by_name(
'lifecycle_environments', entity_dict['lifecycle_environment'], params=scope, thin=True)
if 'content_view' in entity_dict:
entity_dict['content_view'] = module.find_resource_by_name('content_views', entity_dict['content_view'], params=scope, thin=True)
entity = module.find_resource_by_name('activation_keys', name=entity_dict['name'], params=scope, failsafe=True)
if module.state == 'copied':
new_entity = module.find_resource_by_name('activation_keys', name=entity_dict['new_name'], params=scope, failsafe=True)
if new_entity is not None:
module.warn("Activation Key '{0}' already exists.".format(entity_dict['new_name']))
module.exit_json(changed=False)
subscriptions = entity_dict.pop('subscriptions', None)
content_overrides = entity_dict.pop('content_overrides', None)
host_collections = entity_dict.pop('host_collections', None)
changed, activation_key = module.ensure_entity('activation_keys', entity_dict, entity, params=scope)
# only update subscriptions of newly created or updated AKs
# copied keys inherit the subscriptions of the origin, so one would not have to specify them again
# deleted keys don't need subscriptions anymore either
if module.state == 'present' or (module.state == 'present_with_defaults' and changed):
# the auto_attach, release_version and service_level parameters can only be set on an existing AK with an update,
# not during create, so let's force an update. see https://projects.theforeman.org/issues/27632 for details
if any(key in entity_dict for key in ['auto_attach', 'release_version', 'service_level']) and changed:
_activation_key_changed, activation_key = module.ensure_entity('activation_keys', entity_dict, activation_key, params=scope)
ak_scope = {'activation_key_id': activation_key['id']}
if subscriptions is not None:
desired_subscriptions = []
for subscription in subscriptions:
if subscription['name'] is not None and subscription['pool_id'] is None:
desired_subscriptions.append(module.find_resource_by_name('subscriptions', subscription['name'], params=scope, thin=True))
if subscription['pool_id'] is not None:
desired_subscriptions.append(module.find_resource_by_id('subscriptions', subscription['pool_id'], params=scope, thin=True))
desired_subscription_ids = set(item['id'] for item in desired_subscriptions)
current_subscriptions = module.list_resource('subscriptions', params=ak_scope)
current_subscription_ids = set(item['id'] for item in current_subscriptions)
if desired_subscription_ids != current_subscription_ids:
ids_to_remove = current_subscription_ids - desired_subscription_ids
if ids_to_remove:
payload = {
'id': activation_key['id'],
'subscriptions': [{'id': item} for item in ids_to_remove],
}
payload.update(scope)
module.resource_action('activation_keys', 'remove_subscriptions', payload)
ids_to_add = desired_subscription_ids - current_subscription_ids
if ids_to_add:
payload = {
'id': activation_key['id'],
'subscriptions': [{'id': item, 'quantity': 1} for item in ids_to_add],
}
payload.update(scope)
module.resource_action('activation_keys', 'add_subscriptions', payload)
changed = True
if content_overrides is not None:
_product_content_changed, product_content = module.resource_action('activation_keys', 'product_content', params={'id': activation_key['id']})
current_content_overrides = {
product['content']['label']: product['enabled_content_override']
for product in product_content['results']
if product['enabled_content_override'] is not None
}
desired_content_overrides = {
product['label']: override_to_boolnone(product['override']) for product in content_overrides
}
changed_content_overrides = []
for label, override in desired_content_overrides.items():
if override != current_content_overrides.pop(label, None):
changed_content_overrides.append({'content_label': label, 'value': override})
for label in current_content_overrides.keys():
changed_content_overrides.append({'content_label': label, 'reset': True})
if changed_content_overrides:
payload = {
'id': activation_key['id'],
'content_overrides': changed_content_overrides,
}
module.resource_action('activation_keys', 'content_override', payload)
changed = True
if host_collections is not None:
if 'host_collection_ids' in activation_key:
current_host_collection_ids = set(activation_key['host_collection_ids'])
else:
current_host_collection_ids = set(item['id'] for item in activation_key['host_collections'])
desired_host_collections = module.find_resources_by_name('host_collections', host_collections, params=scope, thin=True)
desired_host_collection_ids = set(item['id'] for item in desired_host_collections)
if desired_host_collection_ids != current_host_collection_ids:
ids_to_remove = current_host_collection_ids - desired_host_collection_ids
if ids_to_remove:
payload = {
'id': activation_key['id'],
'host_collection_ids': list(ids_to_remove),
}
module.resource_action('activation_keys', 'remove_host_collections', payload)
ids_to_add = desired_host_collection_ids - current_host_collection_ids
if ids_to_add:
payload = {
'id': activation_key['id'],
'host_collection_ids': list(ids_to_add),
}
module.resource_action('activation_keys', 'add_host_collections', payload)
changed = True
module.exit_json(changed=changed)
|
14,254 |
def get_sim_steps(
time: Union[Real, Decimal],
units: str = "step",
round_mode: str = "error"
) -> int:
"""Calculates the number of simulation time steps for a given amount of *time*.
Args:
time: The value to convert to simulation time steps.
units: String specifying the units of the result
(one of ``'step'``, ``'fs'``, ``'ps'``, ``'ns'``, ``'us'``, ``'ms'``, ``'sec'``).
``'step'`` means time is already in simulation time steps.
round_mode: String specifying how to handle time values that sit between time steps
(one of ``'error'``, ``'round'``, ``'ceil'``, ``'floor'``).
Returns:
The number of simulation time steps.
When *round_mode* is ``"error"``, a :exc:`ValueError` is thrown if the value cannot
be accurately represented in terms of simulator time steps.
When *round_mode* is ``"round"``, ``"ceil"``, or ``"floor"``, the corresponding
rounding function from the standard library will be used to round to a simulator
time step.
.. versionchanged:: 1.5
Support ``'step'`` as the the *units* argument to mean "simulator time step".
.. versionchanged:: 1.6
Support rounding modes.
"""
if units not in (None, "step"):
result = _ldexp10(time, _get_log_time_scale(units) - _get_simulator_precision())
else:
result = time
if units is None:
warnings.warn(
'Using units=None is deprecated, use units="step" instead.',
DeprecationWarning, stacklevel=2)
units="step" # don't propagate deprecated value
if round_mode == "error":
result_rounded = math.floor(result)
if result_rounded != result:
precision = _get_simulator_precision()
raise ValueError(
f"Unable to accurately represent {time}({units}) with the simulator precision of 1e{precision}"
)
elif round_mode == "ceil":
result_rounded = math.ceil(result)
elif round_mode == "round":
result_rounded = round(result)
elif round_mode == "floor":
result_rounded = math.floor(result)
else:
raise ValueError(f"invalid round_mode specifier: {round_mode}")
return result_rounded
|
def get_sim_steps(
time: Union[Real, Decimal],
units: str = "step",
round_mode: str = "error"
) -> int:
"""Calculates the number of simulation time steps for a given amount of *time*.
Args:
time: The value to convert to simulation time steps.
units: String specifying the units of the result
(one of ``'step'``, ``'fs'``, ``'ps'``, ``'ns'``, ``'us'``, ``'ms'``, ``'sec'``).
``'step'`` means *time* is already in simulation time steps.
round_mode: String specifying how to handle time values that sit between time steps
(one of ``'error'``, ``'round'``, ``'ceil'``, ``'floor'``).
Returns:
The number of simulation time steps.
When *round_mode* is ``"error"``, a :exc:`ValueError` is thrown if the value cannot
be accurately represented in terms of simulator time steps.
When *round_mode* is ``"round"``, ``"ceil"``, or ``"floor"``, the corresponding
rounding function from the standard library will be used to round to a simulator
time step.
.. versionchanged:: 1.5
Support ``'step'`` as the the *units* argument to mean "simulator time step".
.. versionchanged:: 1.6
Support rounding modes.
"""
if units not in (None, "step"):
result = _ldexp10(time, _get_log_time_scale(units) - _get_simulator_precision())
else:
result = time
if units is None:
warnings.warn(
'Using units=None is deprecated, use units="step" instead.',
DeprecationWarning, stacklevel=2)
units="step" # don't propagate deprecated value
if round_mode == "error":
result_rounded = math.floor(result)
if result_rounded != result:
precision = _get_simulator_precision()
raise ValueError(
f"Unable to accurately represent {time}({units}) with the simulator precision of 1e{precision}"
)
elif round_mode == "ceil":
result_rounded = math.ceil(result)
elif round_mode == "round":
result_rounded = round(result)
elif round_mode == "floor":
result_rounded = math.floor(result)
else:
raise ValueError(f"invalid round_mode specifier: {round_mode}")
return result_rounded
|
40,480 |
def dropout_node(edge_index: Tensor, edge_attr: OptTensor = None,
p: float = 0.5, num_nodes: Optional[int] = None,
training: bool = True) -> Tuple[Tensor, OptTensor]:
r"""Randomly drops nodes from the adjacency matrix
:obj:`(edge_index, edge_attr)` with probability :obj:`p` using samples from
a Bernoulli distribution.
Args:
edge_index (LongTensor): The edge indices.
edge_attr (Tensor, optional): Edge weights or multi-dimensional
edge features. (default: :obj:`None`)
p (float, optional): Dropout probability. (default: :obj:`0.5`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
training (bool, optional): If set to :obj:`False`, this operation is a
no-op. (default: :obj:`True`)
Examples:
>>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],
... [1, 0, 2, 1, 3, 2]])
>>> edge_attr = torch.tensor([1, 2, 3, 4, 5, 6])
>>> dropout_node(edge_index, edge_attr)
(tensor([[2, 3],
[3, 2]]),
tensor([5, 6]))
"""
if p < 0. or p > 1.:
raise ValueError(f'Dropout probability has to be between 0 and 1 '
f'(got {p}')
if not training or p == 0.0:
return edge_index, edge_attr
num_nodes = maybe_num_nodes(edge_index, num_nodes)
nodes = torch.arange(num_nodes, dtype=torch.long, device=edge_index.device)
mask = torch.full_like(nodes, 1 - p, dtype=torch.float32)
mask = torch.bernoulli(mask).to(torch.bool)
subset = nodes[mask]
return subgraph(subset, edge_index, edge_attr, num_nodes=num_nodes)
|
def dropout_node(edge_index: Tensor,
p: float = 0.5, num_nodes: Optional[int] = None,
training: bool = True) -> Tuple[Tensor, OptTensor]:
r"""Randomly drops nodes from the adjacency matrix
:obj:`(edge_index, edge_attr)` with probability :obj:`p` using samples from
a Bernoulli distribution.
Args:
edge_index (LongTensor): The edge indices.
edge_attr (Tensor, optional): Edge weights or multi-dimensional
edge features. (default: :obj:`None`)
p (float, optional): Dropout probability. (default: :obj:`0.5`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
training (bool, optional): If set to :obj:`False`, this operation is a
no-op. (default: :obj:`True`)
Examples:
>>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],
... [1, 0, 2, 1, 3, 2]])
>>> edge_attr = torch.tensor([1, 2, 3, 4, 5, 6])
>>> dropout_node(edge_index, edge_attr)
(tensor([[2, 3],
[3, 2]]),
tensor([5, 6]))
"""
if p < 0. or p > 1.:
raise ValueError(f'Dropout probability has to be between 0 and 1 '
f'(got {p}')
if not training or p == 0.0:
return edge_index, edge_attr
num_nodes = maybe_num_nodes(edge_index, num_nodes)
nodes = torch.arange(num_nodes, dtype=torch.long, device=edge_index.device)
mask = torch.full_like(nodes, 1 - p, dtype=torch.float32)
mask = torch.bernoulli(mask).to(torch.bool)
subset = nodes[mask]
return subgraph(subset, edge_index, edge_attr, num_nodes=num_nodes)
|
1,247 |
def proc_file(f, opts):
verbose(1, f"Loading {f}")
row = [f"@l{f}"]
try:
vol = nib.load(f)
h = vol.header
except Exception as e:
row += ['failed']
verbose(2, f"Failed to gather information -- {str(e)}")
return row
row += [str(safe_get(h, 'data_dtype')),
f"@l[{ap(safe_get(h, 'data_shape'), '%3g')}]",
f"@l{ap(safe_get(h, 'zooms'), '%.2f', 'x')}"]
# Slope
if hasattr(h, 'has_data_slope') and \
(h.has_data_slope or h.has_data_intercept) and \
not h.get_slope_inter() in [(1.0, 0.0), (None, None)]:
row += ['@l*%.3g+%.3g' % h.get_slope_inter()]
else:
row += ['']
if hasattr(h, 'extensions') and len(h.extensions):
row += ['@l#exts: %d' % len(h.extensions)]
else:
row += ['']
if opts.header_fields:
# signals "all fields"
if opts.header_fields == 'all':
# TODO: might vary across file types, thus prior sensing
# would be needed
header_fields = h.keys()
else:
header_fields = opts.header_fields.split(',')
for f in header_fields:
if not f: # skip empty
continue
try:
row += [str(h[f])]
except (KeyError, ValueError):
row += [_err()]
try:
if (hasattr(h, 'get_qform') and hasattr(h, 'get_sform') and
(h.get_qform() != h.get_sform()).any()):
row += ['sform']
else:
row += ['']
except Exception as e:
verbose(2, f"Failed to obtain qform or sform -- {str(e)}")
if isinstance(h, nib.AnalyzeHeader):
row += ['']
else:
row += [_err()]
if opts.stats or opts.counts:
# We are doomed to load data
try:
d = np.asarray(vol.dataobj)
if not opts.stats_zeros:
d = d[np.nonzero(d)]
else:
# at least flatten it -- functionality below doesn't
# depend on the original shape, so let's use a flat view
d = d.reshape(-1)
if opts.stats:
# just # of elements
row += ["@l[%d]" % np.prod(d.shape)]
# stats
row += [len(d) and f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' or '-']
if opts.counts:
items, inv = np.unique(d, return_inverse=True)
if len(items) > 1000 and not opts.all_counts:
counts = _err("%d uniques. Use --all-counts" % len(items))
else:
freq = np.bincount(inv)
counts = " ".join("%g:%d" % (i, f) for i, f in zip(items, freq))
row += ["@l" + counts]
except IOError as e:
verbose(2, f"Failed to obtain stats/counts -- {str(e)}")
row += [_err()]
return row
|
def proc_file(f, opts):
verbose(1, f"Loading {f}")
row = [f"@l{f}"]
try:
vol = nib.load(f)
h = vol.header
except Exception as e:
row += ['failed']
verbose(2, f"Failed to gather information -- {str(e)}")
return row
row += [str(safe_get(h, 'data_dtype')),
f"@l[{ap(safe_get(h, 'data_shape'), '%3g')}]",
f"@l{ap(safe_get(h, 'zooms'), '%.2f', 'x')}"]
# Slope
if hasattr(h, 'has_data_slope') and \
(h.has_data_slope or h.has_data_intercept) and \
not h.get_slope_inter() in [(1.0, 0.0), (None, None)]:
row += ['@l*%.3g+%.3g' % h.get_slope_inter()]
else:
row += ['']
if hasattr(h, 'extensions') and len(h.extensions):
row += ['@l#exts: %d' % len(h.extensions)]
else:
row += ['']
if opts.header_fields:
# signals "all fields"
if opts.header_fields == 'all':
# TODO: might vary across file types, thus prior sensing
# would be needed
header_fields = h.keys()
else:
header_fields = opts.header_fields.split(',')
for f in header_fields:
if not f: # skip empty
continue
try:
row += [str(h[f])]
except (KeyError, ValueError):
row += [_err()]
try:
if (hasattr(h, 'get_qform') and hasattr(h, 'get_sform') and
(h.get_qform() != h.get_sform()).any()):
row += ['sform']
else:
row += ['']
except Exception as e:
verbose(2, f"Failed to obtain qform or sform -- {e}")
if isinstance(h, nib.AnalyzeHeader):
row += ['']
else:
row += [_err()]
if opts.stats or opts.counts:
# We are doomed to load data
try:
d = np.asarray(vol.dataobj)
if not opts.stats_zeros:
d = d[np.nonzero(d)]
else:
# at least flatten it -- functionality below doesn't
# depend on the original shape, so let's use a flat view
d = d.reshape(-1)
if opts.stats:
# just # of elements
row += ["@l[%d]" % np.prod(d.shape)]
# stats
row += [len(d) and f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' or '-']
if opts.counts:
items, inv = np.unique(d, return_inverse=True)
if len(items) > 1000 and not opts.all_counts:
counts = _err("%d uniques. Use --all-counts" % len(items))
else:
freq = np.bincount(inv)
counts = " ".join("%g:%d" % (i, f) for i, f in zip(items, freq))
row += ["@l" + counts]
except IOError as e:
verbose(2, f"Failed to obtain stats/counts -- {str(e)}")
row += [_err()]
return row
|
17,816 |
def run(plotIt=True, saveFig=False, cleanup=True):
"""
Run 1D inversions for a single sounding of the RESOLVE and SkyTEM
bookpurnong data
:param bool plotIt: show the plots?
:param bool saveFig: save the figure
:param bool cleanup: remove the downloaded results
"""
downloads, directory = download_and_unzip_data()
resolve = h5py.File(
os.path.sep.join([directory, "booky_resolve.hdf5"]),
"r"
)
skytem = h5py.File(
os.path.sep.join([directory, "booky_skytem.hdf5"]),
"r"
)
river_path = resolve["river_path"].value
# Choose a sounding location to invert
xloc, yloc = 462100.0, 6196500.0
rxind_skytem = np.argmin(
abs(skytem["xy"][:, 0]-xloc)+abs(skytem["xy"][:, 1]-yloc)
)
rxind_resolve = np.argmin(
abs(resolve["xy"][:, 0]-xloc)+abs(resolve["xy"][:, 1]-yloc)
)
# Plot both resolve and skytem data on 2D plane
fig = plt.figure(figsize=(13, 6))
title = ["RESOLVE In-phase 400 Hz", "SkyTEM High moment 156 $\mu$s"]
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
axs = [ax1, ax2]
out_re = utils.plot2Ddata(
resolve["xy"], resolve["data"][:, 0], ncontour=100,
contourOpts={"cmap": "viridis"}, ax=ax1
)
vmin, vmax = out_re[0].get_clim()
cb_re = plt.colorbar(
out_re[0], ticks=np.linspace(vmin, vmax, 3), ax=ax1,
fraction=0.046, pad=0.04
)
temp_skytem = skytem["data"][:, 5].copy()
temp_skytem[skytem["data"][:, 5] > 7e-10] = 7e-10
out_sky = utils.plot2Ddata(
skytem["xy"][:, :2], temp_skytem, ncontour=100,
contourOpts={"cmap": "viridis", "vmax": 7e-10}, ax=ax2
)
vmin, vmax = out_sky[0].get_clim()
cb_sky = plt.colorbar(
out_sky[0], ticks=np.linspace(vmin, vmax*0.99, 3), ax=ax2,
format="%.1e", fraction=0.046, pad=0.04
)
cb_re.set_label("Bz (ppm)")
cb_sky.set_label("dB$_z$ / dt (V/A-m$^4$)")
for i, ax in enumerate(axs):
xticks = [460000, 463000]
yticks = [6195000, 6198000, 6201000]
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.plot(xloc, yloc, 'wo')
ax.plot(river_path[:, 0], river_path[:, 1], 'k', lw=0.5)
ax.set_aspect("equal")
if i == 1:
ax.plot(
skytem["xy"][:, 0], skytem["xy"][:, 1], 'k.',
alpha=0.02, ms=1
)
ax.set_yticklabels([str(" ") for f in yticks])
else:
ax.plot(
resolve["xy"][:, 0], resolve["xy"][:, 1], 'k.', alpha=0.02,
ms=1
)
ax.set_yticklabels([str(f) for f in yticks])
ax.set_ylabel("Northing (m)")
ax.set_xlabel("Easting (m)")
ax.set_title(title[i])
ax.axis('equal')
# plt.tight_layout()
if saveFig is True:
fig.savefig("resolve_skytem_data.png", dpi=600)
# ------------------ Mesh ------------------ #
# Step1: Set 2D cylindrical mesh
cs, ncx, ncz, npad = 1., 10., 10., 20
hx = [(cs, ncx), (cs, npad, 1.3)]
npad = 12
temp = np.logspace(np.log10(1.), np.log10(12.), 19)
temp_pad = temp[-1] * 1.3 ** np.arange(npad)
hz = np.r_[temp_pad[::-1], temp[::-1], temp, temp_pad]
mesh = discretize.CylMesh([hx, 1, hz], '00C')
active = mesh.vectorCCz < 0.
# Step2: Set a SurjectVertical1D mapping
# Note: this sets our inversion model as 1D log conductivity
# below subsurface
active = mesh.vectorCCz < 0.
actMap = maps.InjectActiveCells(mesh, active, np.log(1e-8), nC=mesh.nCz)
mapping = maps.ExpMap(mesh) * maps.SurjectVertical1D(mesh) * actMap
sig_half = 1e-1
sig_air = 1e-8
sigma = np.ones(mesh.nCz)*sig_air
sigma[active] = sig_half
# Initial and reference model
m0 = np.log(sigma[active])
# ------------------ RESOLVE Forward Simulation ------------------ #
# Step3: Invert Resolve data
# Bird height from the surface
b_height_resolve = resolve["src_elevation"].value
src_height_resolve = b_height_resolve[rxind_resolve]
# Set Rx (In-phase and Quadrature)
rxOffset = 7.86
bzr = FDEM.Rx.PointMagneticFluxDensitySecondary(
np.array([[rxOffset, 0., src_height_resolve]]),
orientation='z',
component='real'
)
bzi = FDEM.Rx.PointMagneticFluxDensity(
np.array([[rxOffset, 0., src_height_resolve]]),
orientation='z',
component='imag'
)
# Set Source (In-phase and Quadrature)
frequency_cp = resolve["frequency_cp"].value
freqs = frequency_cp.copy()
srcLoc = np.array([0., 0., src_height_resolve])
srcList = [FDEM.Src.MagDipole([bzr, bzi], freq, srcLoc, orientation='Z')
for freq in freqs]
# Set FDEM survey (In-phase and Quadrature)
survey = FDEM.Survey(srcList)
prb = FDEM.Simulation3DMagneticFluxDensity(
mesh, sigmaMap=mapping, Solver=Solver
)
prb.survey = survey
# ------------------ RESOLVE Inversion ------------------ #
# Primary field
bp = - mu_0/(4*np.pi*rxOffset**3)
# Observed data
cpi_inds = [0, 2, 6, 8, 10]
cpq_inds = [1, 3, 7, 9, 11]
dobs_re = np.c_[
resolve["data"][rxind_resolve, :][cpi_inds],
resolve["data"][rxind_resolve, :][cpq_inds]
].flatten() * bp * 1e-6
# Uncertainty
relative = np.repeat(np.r_[np.ones(3)*0.1, np.ones(2)*0.15], 2)
floor = 20 * abs(bp) * 1e-6
uncert = abs(dobs_re) * relative + floor
# Data Misfit
data_resolve = data.Data(dobs=dobs_re, survey=survey, standard_deviation=uncert)
dmisfit = data_misfit.L2DataMisfit(simulation=prb, data=data_resolve)
# Regularization
regMesh = discretize.TensorMesh([mesh.hz[mapping.maps[-1].indActive]])
reg = regularization.Simple(regMesh, mapping=maps.IdentityMap(regMesh))
# Optimization
opt = optimization.InexactGaussNewton(maxIter=5)
# statement of the inverse problem
invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt)
# Inversion directives and parameters
target = directives.TargetMisfit() # stop when we hit target misfit
invProb.beta = 2.
# betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e0)
inv = inversion.BaseInversion(invProb, directiveList=[target])
reg.alpha_s = 1e-3
reg.alpha_x = 1.
reg.mref = m0.copy()
opt.LSshorten = 0.5
opt.remember('xc')
# run the inversion
mopt_re = inv.run(m0)
dpred_re = invProb.dpred
# ------------------ SkyTEM Forward Simulation ------------------ #
# Step4: Invert SkyTEM data
# Bird height from the surface
b_height_skytem = skytem["src_elevation"].value
src_height = b_height_skytem[rxind_skytem]
srcLoc = np.array([0., 0., src_height])
# Radius of the source loop
area = skytem["area"].value
radius = np.sqrt(area/np.pi)
rxLoc = np.array([[radius, 0., src_height]])
# Parameters for current waveform
t0 = skytem["t0"].value
times = skytem["times"].value
waveform_skytem = skytem["waveform"].value
offTime = t0
times_off = times - t0
# Note: we are Using theoretical VTEM waveform,
# but effectively fits SkyTEM waveform
peakTime = 1.0000000e-02
a = 3.
dbdt_z = TDEM.Rx.PointMagneticFluxTimeDerivative(
locations=rxLoc, times=times_off[:-3]+offTime, orientation='z'
) # vertical db_dt
rxList = [dbdt_z] # list of receivers
srcList = [
TDEM.Src.CircularLoop(
rxList, loc=srcLoc, radius=radius,
orientation='z',
waveform=TDEM.Src.VTEMWaveform(
offTime=offTime, peakTime=peakTime, a=3.
)
)
]
# solve the problem at these times
timeSteps = [
(peakTime/5, 5), ((offTime-peakTime)/5, 5),
(1e-5, 5), (5e-5, 5), (1e-4, 10), (5e-4, 15)
]
prob = TDEM.Simulation3DElectricField(
mesh, time_steps=timeSteps, sigmaMap=mapping, Solver=Solver
)
survey = TDEM.Survey(srcList)
prob.survey = survey
src = srcList[0]
rx = src.receiver_list[0]
wave = []
for time in prob.times:
wave.append(src.waveform.eval(time))
wave = np.hstack(wave)
out = prob.dpred(m0)
# plot the waveform
fig = plt.figure(figsize=(5, 3))
times_off = times-t0
plt.plot(waveform_skytem[:, 0], waveform_skytem[:, 1], 'k.')
plt.plot(prob.times, wave, 'k-', lw=2)
plt.legend(("SkyTEM waveform", "Waveform (fit)"), fontsize=10)
for t in rx.times:
plt.plot(np.ones(2)*t, np.r_[-0.03, 0.03], 'k-')
plt.ylim(-0.1, 1.1)
plt.grid(True)
plt.xlabel("Time (s)")
plt.ylabel("Normalized current")
if saveFig:
fig.savefig("skytem_waveform", dpi=200)
# Observed data
dobs_sky = skytem["data"][rxind_skytem, :-3] * area
# ------------------ SkyTEM Inversion ------------------ #
# Uncertainty
relative = 0.12
floor = 7.5e-12
uncert = abs(dobs_sky) * relative + floor
# Data Misfit
data_sky = data.Data(dobs=-dobs_sky, survey=survey, standard_deviation=uncert)
dmisfit = data_misfit.L2DataMisfit(simulation=prob, data=data_sky)
# Regularization
regMesh = discretize.TensorMesh([mesh.hz[mapping.maps[-1].indActive]])
reg = regularization.Simple(regMesh, mapping=maps.IdentityMap(regMesh))
# Optimization
opt = optimization.InexactGaussNewton(maxIter=5)
# statement of the inverse problem
invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt)
# Directives and Inversion Parameters
target = directives.TargetMisfit()
# betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e0)
invProb.beta = 20.
inv = inversion.BaseInversion(invProb, directiveList=[target])
reg.alpha_s = 1e-1
reg.alpha_x = 1.
opt.LSshorten = 0.5
opt.remember('xc')
reg.mref = mopt_re # Use RESOLVE model as a reference model
# run the inversion
mopt_sky = inv.run(m0)
dpred_sky = invProb.dpred
# Plot the figure from the paper
plt.figure(figsize=(12, 8))
fs = 13 # fontsize
matplotlib.rcParams['font.size'] = fs
ax0 = plt.subplot2grid((2, 2), (0, 0), rowspan=2)
ax1 = plt.subplot2grid((2, 2), (0, 1))
ax2 = plt.subplot2grid((2, 2), (1, 1))
# Recovered Models
sigma_re = np.repeat(np.exp(mopt_re), 2, axis=0)
sigma_sky = np.repeat(np.exp(mopt_sky), 2, axis=0)
z = np.repeat(mesh.vectorCCz[active][1:], 2, axis=0)
z = np.r_[mesh.vectorCCz[active][0], z, mesh.vectorCCz[active][-1]]
ax0.semilogx(sigma_re, z, 'k', lw=2, label="RESOLVE")
ax0.semilogx(sigma_sky, z, 'b', lw=2, label="SkyTEM")
ax0.set_ylim(-50, 0)
# ax0.set_xlim(5e-4, 1e2)
ax0.grid(True)
ax0.set_ylabel("Depth (m)")
ax0.set_xlabel("Conducivity (S/m)")
ax0.legend(loc=3)
ax0.set_title("(a) Recovered Models")
# RESOLVE Data
ax1.loglog(
frequency_cp, dobs_re.reshape((5, 2))[:, 0]/bp*1e6, 'k-',
label="Obs (real)"
)
ax1.loglog(
frequency_cp, dobs_re.reshape((5, 2))[:, 1]/bp*1e6, 'k--',
label="Obs (imag)"
)
ax1.loglog(
frequency_cp, dpred_re.reshape((5, 2))[:, 0]/bp*1e6, 'k+', ms=10,
markeredgewidth=2., label="Pred (real)"
)
ax1.loglog(
frequency_cp, dpred_re.reshape((5, 2))[:, 1]/bp*1e6, 'ko', ms=6,
markeredgecolor='k', markeredgewidth=0.5, label="Pred (imag)"
)
ax1.set_title("(b) RESOLVE")
ax1.set_xlabel("Frequency (Hz)")
ax1.set_ylabel("Bz (ppm)")
ax1.grid(True)
ax1.legend(loc=3, fontsize=11)
# SkyTEM data
ax2.loglog(times_off[3:]*1e6, dobs_sky/area, 'b-', label="Obs")
ax2.loglog(
times_off[3:]*1e6, -dpred_sky/area, 'bo', ms=4,
markeredgecolor='k', markeredgewidth=0.5, label="Pred"
)
ax2.set_xlim(times_off.min()*1e6*1.2, times_off.max()*1e6*1.1)
ax2.set_xlabel("Time ($\mu s$)")
ax2.set_ylabel("dBz / dt (V/A-m$^4$)")
ax2.set_title("(c) SkyTEM High-moment")
ax2.grid(True)
ax2.legend(loc=3)
a3 = plt.axes([0.86, .33, .1, .09], facecolor=[0.8, 0.8, 0.8, 0.6])
a3.plot(prob.times*1e6, wave, 'k-')
a3.plot(
rx.times*1e6, np.zeros_like(rx.times), 'k|', markeredgewidth=1,
markersize=12
)
a3.set_xlim([prob.times.min()*1e6*0.75, prob.times.max()*1e6*1.1])
a3.set_title('(d) Waveform', fontsize=11)
a3.set_xticks([prob.times.min()*1e6, t0*1e6, prob.times.max()*1e6])
a3.set_yticks([])
# a3.set_xticklabels(['0', '2e4'])
a3.set_xticklabels(['-1e4', '0', '1e4'])
plt.tight_layout()
if saveFig:
plt.savefig("booky1D_time_freq.png", dpi=600)
if plotIt:
plt.show()
resolve.close()
skytem.close()
if cleanup:
print( os.path.split(directory)[:-1])
os.remove(
os.path.sep.join(
directory.split()[:-1] + ["._bookpurnong_inversion"]
)
)
os.remove(downloads)
shutil.rmtree(directory)
|
def run(plotIt=True, saveFig=False, cleanup=True):
"""
Run 1D inversions for a single sounding of the RESOLVE and SkyTEM
bookpurnong data
:param bool plotIt: show the plots?
:param bool saveFig: save the figure
:param bool cleanup: remove the downloaded results
"""
downloads, directory = download_and_unzip_data()
resolve = h5py.File(
os.path.sep.join([directory, "booky_resolve.hdf5"]),
"r"
)
skytem = h5py.File(
os.path.sep.join([directory, "booky_skytem.hdf5"]),
"r"
)
river_path = resolve["river_path"].value
# Choose a sounding location to invert
xloc, yloc = 462100.0, 6196500.0
rxind_skytem = np.argmin(
abs(skytem["xy"][:, 0]-xloc)+abs(skytem["xy"][:, 1]-yloc)
)
rxind_resolve = np.argmin(
abs(resolve["xy"][:, 0]-xloc)+abs(resolve["xy"][:, 1]-yloc)
)
# Plot both resolve and skytem data on 2D plane
fig = plt.figure(figsize=(13, 6))
title = ["RESOLVE In-phase 400 Hz", "SkyTEM High moment 156 $\mu$s"]
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
axs = [ax1, ax2]
out_re = utils.plot2Ddata(
resolve["xy"], resolve["data"][:, 0], ncontour=100,
contourOpts={"cmap": "viridis"}, ax=ax1
)
vmin, vmax = out_re[0].get_clim()
cb_re = plt.colorbar(
out_re[0], ticks=np.linspace(vmin, vmax, 3), ax=ax1,
fraction=0.046, pad=0.04
)
temp_skytem = skytem["data"][:, 5].copy()
temp_skytem[skytem["data"][:, 5] > 7e-10] = 7e-10
out_sky = utils.plot2Ddata(
skytem["xy"][:, :2], temp_skytem, ncontour=100,
contourOpts={"cmap": "viridis", "vmax": 7e-10}, ax=ax2
)
vmin, vmax = out_sky[0].get_clim()
cb_sky = plt.colorbar(
out_sky[0], ticks=np.linspace(vmin, vmax*0.99, 3), ax=ax2,
format="%.1e", fraction=0.046, pad=0.04
)
cb_re.set_label("Bz (ppm)")
cb_sky.set_label("dB$_z$ / dt (V/A-m$^4$)")
for i, ax in enumerate(axs):
xticks = [460000, 463000]
yticks = [6195000, 6198000, 6201000]
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.plot(xloc, yloc, 'wo')
ax.plot(river_path[:, 0], river_path[:, 1], 'k', lw=0.5)
ax.set_aspect("equal")
if i == 1:
ax.plot(
skytem["xy"][:, 0], skytem["xy"][:, 1], 'k.',
alpha=0.02, ms=1
)
ax.set_yticklabels([str(" ") for f in yticks])
else:
ax.plot(
resolve["xy"][:, 0], resolve["xy"][:, 1], 'k.', alpha=0.02,
ms=1
)
ax.set_yticklabels([str(f) for f in yticks])
ax.set_ylabel("Northing (m)")
ax.set_xlabel("Easting (m)")
ax.set_title(title[i])
ax.axis('equal')
# plt.tight_layout()
if saveFig is True:
fig.savefig("resolve_skytem_data.png", dpi=600)
# ------------------ Mesh ------------------ #
# Step1: Set 2D cylindrical mesh
cs, ncx, ncz, npad = 1., 10., 10., 20
hx = [(cs, ncx), (cs, npad, 1.3)]
npad = 12
temp = np.logspace(np.log10(1.), np.log10(12.), 19)
temp_pad = temp[-1] * 1.3 ** np.arange(npad)
hz = np.r_[temp_pad[::-1], temp[::-1], temp, temp_pad]
mesh = discretize.CylMesh([hx, 1, hz], '00C')
active = mesh.vectorCCz < 0.
# Step2: Set a SurjectVertical1D mapping
# Note: this sets our inversion model as 1D log conductivity
# below subsurface
active = mesh.vectorCCz < 0.
actMap = maps.InjectActiveCells(mesh, active, np.log(1e-8), nC=mesh.nCz)
mapping = maps.ExpMap(mesh) * maps.SurjectVertical1D(mesh) * actMap
sig_half = 1e-1
sig_air = 1e-8
sigma = np.ones(mesh.nCz)*sig_air
sigma[active] = sig_half
# Initial and reference model
m0 = np.log(sigma[active])
# ------------------ RESOLVE Forward Simulation ------------------ #
# Step3: Invert Resolve data
# Bird height from the surface
b_height_resolve = resolve["src_elevation"].value
src_height_resolve = b_height_resolve[rxind_resolve]
# Set Rx (In-phase and Quadrature)
rxOffset = 7.86
bzr = FDEM.Rx.PointMagneticFluxDensitySecondary(
np.array([[rxOffset, 0., src_height_resolve]]),
orientation='z',
component='real'
)
bzi = FDEM.Rx.PointMagneticFluxDensity(
np.array([[rxOffset, 0., src_height_resolve]]),
orientation='z',
component='imag'
)
# Set Source (In-phase and Quadrature)
frequency_cp = resolve["frequency_cp"].value
freqs = frequency_cp.copy()
srcLoc = np.array([0., 0., src_height_resolve])
srcList = [FDEM.Src.MagDipole([bzr, bzi], freq, srcLoc, orientation='Z')
for freq in freqs]
# Set FDEM survey (In-phase and Quadrature)
survey = FDEM.Survey(srcList)
prb = FDEM.Simulation3DMagneticFluxDensity(
mesh, sigmaMap=mapping, Solver=Solver
)
prb.survey = survey
# ------------------ RESOLVE Inversion ------------------ #
# Primary field
bp = - mu_0/(4*np.pi*rxOffset**3)
# Observed data
cpi_inds = [0, 2, 6, 8, 10]
cpq_inds = [1, 3, 7, 9, 11]
dobs_re = np.c_[
resolve["data"][rxind_resolve, :][cpi_inds],
resolve["data"][rxind_resolve, :][cpq_inds]
].flatten() * bp * 1e-6
# Uncertainty
relative = np.repeat(np.r_[np.ones(3)*0.1, np.ones(2)*0.15], 2)
floor = 20 * abs(bp) * 1e-6
uncert = abs(dobs_re) * relative + floor
# Data Misfit
data_resolve = data.Data(dobs=dobs_re, survey=survey, standard_deviation=std)
dmisfit = data_misfit.L2DataMisfit(simulation=prb, data=data_resolve)
# Regularization
regMesh = discretize.TensorMesh([mesh.hz[mapping.maps[-1].indActive]])
reg = regularization.Simple(regMesh, mapping=maps.IdentityMap(regMesh))
# Optimization
opt = optimization.InexactGaussNewton(maxIter=5)
# statement of the inverse problem
invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt)
# Inversion directives and parameters
target = directives.TargetMisfit() # stop when we hit target misfit
invProb.beta = 2.
# betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e0)
inv = inversion.BaseInversion(invProb, directiveList=[target])
reg.alpha_s = 1e-3
reg.alpha_x = 1.
reg.mref = m0.copy()
opt.LSshorten = 0.5
opt.remember('xc')
# run the inversion
mopt_re = inv.run(m0)
dpred_re = invProb.dpred
# ------------------ SkyTEM Forward Simulation ------------------ #
# Step4: Invert SkyTEM data
# Bird height from the surface
b_height_skytem = skytem["src_elevation"].value
src_height = b_height_skytem[rxind_skytem]
srcLoc = np.array([0., 0., src_height])
# Radius of the source loop
area = skytem["area"].value
radius = np.sqrt(area/np.pi)
rxLoc = np.array([[radius, 0., src_height]])
# Parameters for current waveform
t0 = skytem["t0"].value
times = skytem["times"].value
waveform_skytem = skytem["waveform"].value
offTime = t0
times_off = times - t0
# Note: we are Using theoretical VTEM waveform,
# but effectively fits SkyTEM waveform
peakTime = 1.0000000e-02
a = 3.
dbdt_z = TDEM.Rx.PointMagneticFluxTimeDerivative(
locations=rxLoc, times=times_off[:-3]+offTime, orientation='z'
) # vertical db_dt
rxList = [dbdt_z] # list of receivers
srcList = [
TDEM.Src.CircularLoop(
rxList, loc=srcLoc, radius=radius,
orientation='z',
waveform=TDEM.Src.VTEMWaveform(
offTime=offTime, peakTime=peakTime, a=3.
)
)
]
# solve the problem at these times
timeSteps = [
(peakTime/5, 5), ((offTime-peakTime)/5, 5),
(1e-5, 5), (5e-5, 5), (1e-4, 10), (5e-4, 15)
]
prob = TDEM.Simulation3DElectricField(
mesh, time_steps=timeSteps, sigmaMap=mapping, Solver=Solver
)
survey = TDEM.Survey(srcList)
prob.survey = survey
src = srcList[0]
rx = src.receiver_list[0]
wave = []
for time in prob.times:
wave.append(src.waveform.eval(time))
wave = np.hstack(wave)
out = prob.dpred(m0)
# plot the waveform
fig = plt.figure(figsize=(5, 3))
times_off = times-t0
plt.plot(waveform_skytem[:, 0], waveform_skytem[:, 1], 'k.')
plt.plot(prob.times, wave, 'k-', lw=2)
plt.legend(("SkyTEM waveform", "Waveform (fit)"), fontsize=10)
for t in rx.times:
plt.plot(np.ones(2)*t, np.r_[-0.03, 0.03], 'k-')
plt.ylim(-0.1, 1.1)
plt.grid(True)
plt.xlabel("Time (s)")
plt.ylabel("Normalized current")
if saveFig:
fig.savefig("skytem_waveform", dpi=200)
# Observed data
dobs_sky = skytem["data"][rxind_skytem, :-3] * area
# ------------------ SkyTEM Inversion ------------------ #
# Uncertainty
relative = 0.12
floor = 7.5e-12
uncert = abs(dobs_sky) * relative + floor
# Data Misfit
data_sky = data.Data(dobs=-dobs_sky, survey=survey, standard_deviation=uncert)
dmisfit = data_misfit.L2DataMisfit(simulation=prob, data=data_sky)
# Regularization
regMesh = discretize.TensorMesh([mesh.hz[mapping.maps[-1].indActive]])
reg = regularization.Simple(regMesh, mapping=maps.IdentityMap(regMesh))
# Optimization
opt = optimization.InexactGaussNewton(maxIter=5)
# statement of the inverse problem
invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt)
# Directives and Inversion Parameters
target = directives.TargetMisfit()
# betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e0)
invProb.beta = 20.
inv = inversion.BaseInversion(invProb, directiveList=[target])
reg.alpha_s = 1e-1
reg.alpha_x = 1.
opt.LSshorten = 0.5
opt.remember('xc')
reg.mref = mopt_re # Use RESOLVE model as a reference model
# run the inversion
mopt_sky = inv.run(m0)
dpred_sky = invProb.dpred
# Plot the figure from the paper
plt.figure(figsize=(12, 8))
fs = 13 # fontsize
matplotlib.rcParams['font.size'] = fs
ax0 = plt.subplot2grid((2, 2), (0, 0), rowspan=2)
ax1 = plt.subplot2grid((2, 2), (0, 1))
ax2 = plt.subplot2grid((2, 2), (1, 1))
# Recovered Models
sigma_re = np.repeat(np.exp(mopt_re), 2, axis=0)
sigma_sky = np.repeat(np.exp(mopt_sky), 2, axis=0)
z = np.repeat(mesh.vectorCCz[active][1:], 2, axis=0)
z = np.r_[mesh.vectorCCz[active][0], z, mesh.vectorCCz[active][-1]]
ax0.semilogx(sigma_re, z, 'k', lw=2, label="RESOLVE")
ax0.semilogx(sigma_sky, z, 'b', lw=2, label="SkyTEM")
ax0.set_ylim(-50, 0)
# ax0.set_xlim(5e-4, 1e2)
ax0.grid(True)
ax0.set_ylabel("Depth (m)")
ax0.set_xlabel("Conducivity (S/m)")
ax0.legend(loc=3)
ax0.set_title("(a) Recovered Models")
# RESOLVE Data
ax1.loglog(
frequency_cp, dobs_re.reshape((5, 2))[:, 0]/bp*1e6, 'k-',
label="Obs (real)"
)
ax1.loglog(
frequency_cp, dobs_re.reshape((5, 2))[:, 1]/bp*1e6, 'k--',
label="Obs (imag)"
)
ax1.loglog(
frequency_cp, dpred_re.reshape((5, 2))[:, 0]/bp*1e6, 'k+', ms=10,
markeredgewidth=2., label="Pred (real)"
)
ax1.loglog(
frequency_cp, dpred_re.reshape((5, 2))[:, 1]/bp*1e6, 'ko', ms=6,
markeredgecolor='k', markeredgewidth=0.5, label="Pred (imag)"
)
ax1.set_title("(b) RESOLVE")
ax1.set_xlabel("Frequency (Hz)")
ax1.set_ylabel("Bz (ppm)")
ax1.grid(True)
ax1.legend(loc=3, fontsize=11)
# SkyTEM data
ax2.loglog(times_off[3:]*1e6, dobs_sky/area, 'b-', label="Obs")
ax2.loglog(
times_off[3:]*1e6, -dpred_sky/area, 'bo', ms=4,
markeredgecolor='k', markeredgewidth=0.5, label="Pred"
)
ax2.set_xlim(times_off.min()*1e6*1.2, times_off.max()*1e6*1.1)
ax2.set_xlabel("Time ($\mu s$)")
ax2.set_ylabel("dBz / dt (V/A-m$^4$)")
ax2.set_title("(c) SkyTEM High-moment")
ax2.grid(True)
ax2.legend(loc=3)
a3 = plt.axes([0.86, .33, .1, .09], facecolor=[0.8, 0.8, 0.8, 0.6])
a3.plot(prob.times*1e6, wave, 'k-')
a3.plot(
rx.times*1e6, np.zeros_like(rx.times), 'k|', markeredgewidth=1,
markersize=12
)
a3.set_xlim([prob.times.min()*1e6*0.75, prob.times.max()*1e6*1.1])
a3.set_title('(d) Waveform', fontsize=11)
a3.set_xticks([prob.times.min()*1e6, t0*1e6, prob.times.max()*1e6])
a3.set_yticks([])
# a3.set_xticklabels(['0', '2e4'])
a3.set_xticklabels(['-1e4', '0', '1e4'])
plt.tight_layout()
if saveFig:
plt.savefig("booky1D_time_freq.png", dpi=600)
if plotIt:
plt.show()
resolve.close()
skytem.close()
if cleanup:
print( os.path.split(directory)[:-1])
os.remove(
os.path.sep.join(
directory.split()[:-1] + ["._bookpurnong_inversion"]
)
)
os.remove(downloads)
shutil.rmtree(directory)
|
9,459 |
def main():
# define the available arguments/parameters that a user can pass to
# the module
argument_spec = meraki_argument_spec()
argument_spec.update(
net_id=dict(type='str'),
net_name=dict(type='str'),
name=dict(type='str'),
subnet=dict(type='str'),
gateway_ip=dict(type='str'),
state=dict(type='str', choices=['absent', 'query', 'present'], default='present'),
fixed_ip_assignments=dict(type='list'),
reserved_ip_ranges=dict(type='list'),
route_id=dict(type='str'),
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False,
)
meraki = MerakiModule(module, function='static_route')
module.params['follow_redirects'] = 'all'
payload = None
query_urls = {'static_route': '/networks/{net_id}/staticRoutes'}
query_one_urls = {'static_route': '/networks/{net_id}/staticRoutes/{route_id}'}
create_urls = {'static_route': '/networks/{net_id}/staticRoutes/'}
update_urls = {'static_route': '/networks/{net_id}/staticRoutes/{route_id}'}
delete_urls = {'static_route': '/networks/{net_id}/staticRoutes/{route_id}'}
meraki.url_catalog['get_all'].update(query_urls)
meraki.url_catalog['get_one'].update(query_one_urls)
meraki.url_catalog['create'] = create_urls
meraki.url_catalog['update'] = update_urls
meraki.url_catalog['delete'] = delete_urls
if not meraki.params['org_name'] and not meraki.params['org_id']:
meraki.fail_json(msg='org_name or org_id parameters are required')
if not meraki.params['net_name'] and not meraki.params['net_id']:
meraki.fail_json(msg='net_name or net_id parameters are required')
if meraki.params['net_name'] and meraki.params['net_id']:
meraki.fail_json(msg='net_name and net_id are mutually exclusive')
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
return meraki.result
# Construct payload
if meraki.params['state'] == 'present':
payload = dict()
if meraki.params['net_name']:
payload['name'] = meraki.params['net_name']
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.params['org_id']
if not org_id:
org_id = meraki.get_org_id(meraki.params['org_name'])
nets = meraki.get_nets(org_id=org_id)
net_id = meraki.params['net_id']
if net_id is None:
net_id = meraki.get_net_id(net_name=meraki.params['net_name'], data=nets)
if meraki.params['state'] == 'query':
if meraki.params['route_id'] is not None:
meraki.result['data'] = get_static_route(meraki, net_id, meraki.params['route_id'])
else:
meraki.result['data'] = get_static_routes(meraki, net_id)
elif meraki.params['state'] == 'present':
payload = dict()
payload['name'] = meraki.params['name']
payload['subnet'] = meraki.params['subnet']
payload['gatewayIp'] = meraki.params['gateway_ip']
if meraki.params['fixed_ip_assignments'] is not None:
payload['fixedIpAssignments'] = meraki.params['fixed_ip_assignments']
if meraki.params['reserved_ip_ranges'] is not None:
payload['reserved_ip_ranges'] = meraki.params['reserved_ip_ranges']
if meraki.params['route_id']:
existing_route = get_static_route(meraki, net_id, meraki.params['route_id'])
if meraki.is_update_required(existing_route, payload, optional_ignore=['id']):
path = meraki.construct_path('update', net_id=net_id, custom={'route_id': meraki.params['route_id']})
meraki.result['data'] = meraki.request(path, method="PUT", payload=json.dumps(payload))
meraki.result['changed'] = True
else:
path = meraki.construct_path('create', net_id=net_id)
meraki.result['data'] = meraki.request(path, method="POST", payload=json.dumps(payload))
meraki.result['changed'] = True
elif meraki.params['state'] == 'absent':
path = meraki.construct_path('delete', net_id=net_id, custom={'route_id': meraki.params['route_id']})
meraki.result['data'] = meraki.request(path, method='DELETE')
meraki.result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
|
def main():
# define the available arguments/parameters that a user can pass to
# the module
argument_spec = meraki_argument_spec()
argument_spec.update(
net_id=dict(type='str'),
net_name=dict(type='str'),
name=dict(type='str'),
subnet=dict(type='str'),
gateway_ip=dict(type='str'),
state=dict(type='str', choices=['absent', 'query', 'present'], default='present'),
fixed_ip_assignments=dict(type='list'),
reserved_ip_ranges=dict(type='list'),
route_id=dict(type='str'),
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False,
)
meraki = MerakiModule(module, function='static_route')
module.params['follow_redirects'] = 'all'
payload = None
query_urls = {'static_route': '/networks/{net_id}/staticRoutes'}
query_one_urls = {'static_route': '/networks/{net_id}/staticRoutes/{route_id}'}
create_urls = {'static_route': '/networks/{net_id}/staticRoutes/'}
update_urls = {'static_route': '/networks/{net_id}/staticRoutes/{route_id}'}
delete_urls = {'static_route': '/networks/{net_id}/staticRoutes/{route_id}'}
meraki.url_catalog['get_all'].update(query_urls)
meraki.url_catalog['get_one'].update(query_one_urls)
meraki.url_catalog['create'] = create_urls
meraki.url_catalog['update'] = update_urls
meraki.url_catalog['delete'] = delete_urls
if not meraki.params['org_name'] and not meraki.params['org_id']:
meraki.fail_json(msg="Parameters 'org_name' or 'org_id' parameters are required")
if not meraki.params['net_name'] and not meraki.params['net_id']:
meraki.fail_json(msg='net_name or net_id parameters are required')
if meraki.params['net_name'] and meraki.params['net_id']:
meraki.fail_json(msg='net_name and net_id are mutually exclusive')
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
return meraki.result
# Construct payload
if meraki.params['state'] == 'present':
payload = dict()
if meraki.params['net_name']:
payload['name'] = meraki.params['net_name']
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.params['org_id']
if not org_id:
org_id = meraki.get_org_id(meraki.params['org_name'])
nets = meraki.get_nets(org_id=org_id)
net_id = meraki.params['net_id']
if net_id is None:
net_id = meraki.get_net_id(net_name=meraki.params['net_name'], data=nets)
if meraki.params['state'] == 'query':
if meraki.params['route_id'] is not None:
meraki.result['data'] = get_static_route(meraki, net_id, meraki.params['route_id'])
else:
meraki.result['data'] = get_static_routes(meraki, net_id)
elif meraki.params['state'] == 'present':
payload = dict()
payload['name'] = meraki.params['name']
payload['subnet'] = meraki.params['subnet']
payload['gatewayIp'] = meraki.params['gateway_ip']
if meraki.params['fixed_ip_assignments'] is not None:
payload['fixedIpAssignments'] = meraki.params['fixed_ip_assignments']
if meraki.params['reserved_ip_ranges'] is not None:
payload['reserved_ip_ranges'] = meraki.params['reserved_ip_ranges']
if meraki.params['route_id']:
existing_route = get_static_route(meraki, net_id, meraki.params['route_id'])
if meraki.is_update_required(existing_route, payload, optional_ignore=['id']):
path = meraki.construct_path('update', net_id=net_id, custom={'route_id': meraki.params['route_id']})
meraki.result['data'] = meraki.request(path, method="PUT", payload=json.dumps(payload))
meraki.result['changed'] = True
else:
path = meraki.construct_path('create', net_id=net_id)
meraki.result['data'] = meraki.request(path, method="POST", payload=json.dumps(payload))
meraki.result['changed'] = True
elif meraki.params['state'] == 'absent':
path = meraki.construct_path('delete', net_id=net_id, custom={'route_id': meraki.params['route_id']})
meraki.result['data'] = meraki.request(path, method='DELETE')
meraki.result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
|
11,962 |
def format_satoshis(
x: Union[int, float, Decimal, str, None], # amount in satoshis
*,
num_zeros: int = 0,
decimal_point: int = 8, # how much to shift decimal point to left (default: sat->BTC)
precision: int = 0, # extra digits after satoshi precision
is_diff: bool = False, # if True, enforce a leading sign (+/-)
whitespaces: bool = False, # if True, add whitespaces, to align numbers in a column
add_thousands_sep: bool = False, # if True, add whitespaces, for better readability of the numbers
) -> str:
if x is None:
return 'unknown'
if x == '!':
return 'max'
assert isinstance(x, (int, float, Decimal)), f"{x!r} should be a number"
# lose redundant precision
x = Decimal(x).quantize(Decimal(10) ** (-precision))
# format string
overall_precision = decimal_point + precision # max digits after final decimal point
decimal_format = "." + str(overall_precision) if overall_precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
# initial result
scale_factor = pow(10, decimal_point)
result = ("{:" + decimal_format + "f}").format(x / scale_factor)
if "." not in result: result += "."
result = result.rstrip('0')
# add extra decimal places (zeros)
integer_part, fract_part = result.split(".")
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + DECIMAL_POINT + fract_part
# add whitespaces as thousands' separator for better readability of numbers
if add_thousands_sep:
integer_part = "{:,}".format(int(integer_part)).replace(',', ' ')
if len(fract_part) > 3:
fractional_part = fract_part
fract_part = ''
for i in range(0,len(fractional_part),3):
fract_part += fractional_part[i:i+3] + ' '
result = integer_part + DECIMAL_POINT + fract_part
# add leading/trailing whitespaces so that numbers can be aligned in a column
if whitespaces:
# add trailing whitespaces
result += " " * (overall_precision - len(fract_part))
# add leading whitespaces
target_len = 15 + precision
result = " " * (target_len - len(result)) + result
return result
|
def format_satoshis(
x: Union[int, float, Decimal, str, None], # amount in satoshis
*,
num_zeros: int = 0,
decimal_point: int = 8, # how much to shift decimal point to left (default: sat->BTC)
precision: int = 0, # extra digits after satoshi precision
is_diff: bool = False, # if True, enforce a leading sign (+/-)
whitespaces: bool = False, # if True, add whitespaces, to align numbers in a column
add_thousands_sep: bool = False, # if True, add whitespaces, for better readability of the numbers
) -> str:
if x is None:
return 'unknown'
if x == '!':
return 'max'
assert isinstance(x, (int, float, Decimal)), f"{x!r} should be a number"
# lose redundant precision
x = Decimal(x).quantize(Decimal(10) ** (-precision))
# format string
overall_precision = decimal_point + precision # max digits after final decimal point
decimal_format = "." + str(overall_precision) if overall_precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
# initial result
scale_factor = pow(10, decimal_point)
result = ("{:" + decimal_format + "f}").format(x / scale_factor)
if "." not in result: result += "."
result = result.rstrip('0')
# add extra decimal places (zeros)
integer_part, fract_part = result.split(".")
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + DECIMAL_POINT + fract_part
# add whitespaces as thousands' separator for better readability of numbers
if add_thousands_sep:
integer_part = "{:,}".format(int(integer_part)).replace(',', ' ')
fract_part = " ".join(fract_part[i:i+3] for i in range(0, len(fract_part), 3))
result = integer_part + DECIMAL_POINT + fract_part
# add leading/trailing whitespaces so that numbers can be aligned in a column
if whitespaces:
# add trailing whitespaces
result += " " * (overall_precision - len(fract_part))
# add leading whitespaces
target_len = 15 + precision
result = " " * (target_len - len(result)) + result
return result
|
32,300 |
def run_polling_command(client: MsClient, args: dict, cmd: str, action_func: Callable,
results_function: Callable, post_polling_process: Callable):
"""
This function is generically handling the polling flow. In the polling flow, there is always an initial call that
starts the uploading to the API (referred here as the 'upload' function) and another call that retrieves the status
of that upload (referred here as the 'results' function).
The run_polling_command function runs the 'upload' function and returns a ScheduledCommand object that schedules
the next 'results' function, until the polling is complete.
Args:
args: the arguments required to the command being called, under cmd
cmd: the command to schedule by after the current command
upload_function: the function that initiates the uploading to the API
results_function: the function that retrieves the status of the previously initiated upload process
uploaded_item: the type of item being uploaded
Returns:
"""
ScheduledCommand.raise_error_if_not_supported()
interval_in_secs = int(args.get('interval_in_seconds', 10))
timeout_in_seconds = int(args.get('timeout_in_seconds', 600))
# distinguish between the initial run, which is the upload run, and the results run
is_first_run = 'machine_action_id' not in args
if is_first_run:
command_results = action_func(client, args)
outputs = command_results.outputs
# schedule next poll
polling_args = {
'machine_action_id': outputs.get('action_id') or command_results.raw_response.get("id"),
'interval_in_seconds': interval_in_secs,
'polling': True,
**args,
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=timeout_in_seconds)
command_results.scheduled_command = scheduled_command
return command_results
# not a first run
command_result = results_function(client, args)
action_status = command_result.outputs.get("status")
demisto.debug(f"action status is: {action_status}")
if len(command_result.outputs.get("commands", [])) > 0:
command_status = command_result.outputs.get("commands", [{}])[0].get("commandStatus")
else:
command_status = 'Completed' if action_status == "Succeeded" else None
if action_status in ['Failed', 'Cancelled'] or command_status == 'Failed':
error_msg = f"Command {action_status}."
if len(command_result.outputs.get("commands", [])) > 0:
error_msg += f'{command_result.outputs.get("commands", [{}])[0].get("errors")}'
raise Exception(error_msg)
elif command_status != 'Completed' or action_status == 'InProgress':
demisto.debug("action status is not completed")
# schedule next poll
polling_args = {
'interval_in_seconds': interval_in_secs,
'polling': True,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=timeout_in_seconds
)
command_result = CommandResults(scheduled_command=scheduled_command)
return command_result
# action was completed
else:
return post_polling_process(client, command_result.outputs)
|
def run_polling_command(client: MsClient, args: dict, cmd: str, action_func: Callable,
results_function: Callable, post_polling_process: Callable):
"""
This function is generically handling the polling flow. In the polling flow, there is always an initial call that
starts the uploading to the API (referred here as the 'upload' function) and another call that retrieves the status
of that upload (referred here as the 'results' function).
The run_polling_command function runs the 'upload' function and returns a ScheduledCommand object that schedules
the next 'results' function, until the polling is complete.
Args:
args: the arguments required to the command being called, under cmd
cmd: the command to schedule by after the current command
upload_function: the function that initiates the uploading to the API
results_function: the function that retrieves the status of the previously initiated upload process
uploaded_item: the type of item being uploaded
Returns:
"""
ScheduledCommand.raise_error_if_not_supported()
interval_in_secs = int(args.get('interval_in_seconds', 10))
timeout_in_seconds = int(args.get('timeout_in_seconds', 600))
# distinguish between the initial run, which is the upload run, and the results run
is_first_run = 'machine_action_id' not in args
if is_first_run:
command_results = action_func(client, args)
outputs = command_results.outputs
# schedule next poll
polling_args = {
'machine_action_id': outputs.get('action_id') or command_results.raw_response.get("id"),
'interval_in_seconds': interval_in_secs,
'polling': True,
**args,
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=timeout_in_seconds)
command_results.scheduled_command = scheduled_command
return command_results
# not a first run
command_result = results_function(client, args)
action_status = command_result.outputs.get("status")
demisto.debug(f"action status is: {action_status}")
if len(command_result.outputs.get("commands", [])) > 0:
command_status = command_result.outputs.get("commands", [{}])[0].get("commandStatus")
else:
command_status = 'Completed' if action_status == "Succeeded" else None
if action_status in ['Failed', 'Cancelled'] or command_status == 'Failed':
error_msg = f"Command {action_status}."
if command_result.outputs.get("commands", []):
error_msg += f'{command_result.outputs.get("commands", [{}])[0].get("errors")}'
raise Exception(error_msg)
elif command_status != 'Completed' or action_status == 'InProgress':
demisto.debug("action status is not completed")
# schedule next poll
polling_args = {
'interval_in_seconds': interval_in_secs,
'polling': True,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=timeout_in_seconds
)
command_result = CommandResults(scheduled_command=scheduled_command)
return command_result
# action was completed
else:
return post_polling_process(client, command_result.outputs)
|
14,018 |
def get_geometry_type(gdf):
"""
Get basic geometry type of a GeoDataFrame,
and information if the gdf contains Geometry Collections."""
geom_types = list(gdf.geometry.geom_type.unique())
geom_collection = False
# Get the basic geometry type
basic_types = []
for gt in geom_types:
if "Multi" in gt:
geom_collection = True
basic_types.append(gt.replace("Multi", ""))
else:
basic_types.append(gt)
geom_types = list(set(basic_types))
# Check for mixed geometry types
assert len(geom_types) < 2, "GeoDataFrame contains mixed geometry types."
geom_type = geom_types[0]
return (geom_type, geom_collection)
|
def _get_geometry_type(gdf):
"""
Get basic geometry type of a GeoDataFrame,
and information if the gdf contains Geometry Collections."""
geom_types = list(gdf.geometry.geom_type.unique())
geom_collection = False
# Get the basic geometry type
basic_types = []
for gt in geom_types:
if "Multi" in gt:
geom_collection = True
basic_types.append(gt.replace("Multi", ""))
else:
basic_types.append(gt)
geom_types = list(set(basic_types))
# Check for mixed geometry types
assert len(geom_types) < 2, "GeoDataFrame contains mixed geometry types."
geom_type = geom_types[0]
return (geom_type, geom_collection)
|
12,485 |
def try_restrict_literal_union(t: UnionType, s: Type) -> Optional[List[Type]]:
new_items = [] # type: List[Type]
for i in t.relevant_items():
it = get_proper_type(i)
if not mypy.typeops.is_simple_literal(it):
return None
if it != s:
new_items.append(i)
return new_items
|
def try_restrict_literal_union(t: UnionType, s: Type) -> Optional[List[Type]]:
new_items: List[Type] = []
for i in t.relevant_items():
it = get_proper_type(i)
if not mypy.typeops.is_simple_literal(it):
return None
if it != s:
new_items.append(i)
return new_items
|
4,599 |
def fmriprep_confounds_strategy(img_files, denoise_strategy="simple",
**kwargs):
"""
Use preset strategy to load confounds from :term:`fMRIPrep`.
`fmriprep_confounds_strategy` provides an interface to select confounds
based on past literature with limited parameters for user customisation.
.. versionadded:: 0.8.2
Parameters
----------
img_files : path to processed image files, optionally as a list.
Processed nii.gz/dtseries.nii/func.gii file reside in a
:term:`fMRIPrep` generated functional derivative directory (i.e.The
associated confound files should be in the same directory as the image
file). As long as the image file, confound related tsv and json are in
the same directory with BIDS-complied names, `fmriprep_confounds` can
retrieve the relevant files correctly.
- `nii.gz` or `dtseries.nii`: path to files, optionally as a list.
- `func.gii`: list of a pair of paths to files, optionally as a list
of lists.
denoise_strategy : {'simple', 'srubbing', 'compcor', 'ica_aroma'}
Name of preset denoising strategies. Each strategy has a set of
associated configurable parameters. For the documentation on
additional parameters, please refer to
:func:`nilearn.input_data.fmriprep_confounds`.
- 'simple': Load confounds for a simple denosing strategy commonly
used in resting state functional connectivity, described in
:footcite:`Fox2005`. Default as: full motion parameters,
full WM/CSF signals, and high pass filter, with an option to
extract global signal confounds.
Additional parameters: motion, wm_csf, global_signal, demean
- 'srubbing': Load confounds for scrubbing describbed in
:footcite:`Power2012`.Default as: full motion parameters,
full WM/CSF signals, remove segment smaller than 5 continuous
volumes (see docstring of
:func:`nilearn.input_data.fmriprep_confounds`),
high pass filter, with an option to extract global signal confounds.
Additional parameters: motion, wm_csf, scrub, fd_thresh,
std_dvars_thresh, global_signal, demean
- 'compcor': Load confounds using the CompCor strategy from
:footcite:`BEHZADI200790`.Default with full motion parameters,
high pass filter, and anatomical compcor with combined mask.
Additional parameters: motion, n_compcor, compcor, demean
- 'ica_aroma': Load confounds for non-aggresive ICA-AROMA strategy
described in :footcite:`Pruim2015`. The strategy requires
:term:`fMRIPrep` outputs generated with `--use-aroma` suffixed with
`desc-smoothAROMAnonaggr_bold`. See notes for more details about
this option.
Additional parameters: wm_csf, global_signal, demean
Other keyword arguments:
See additional parameters associated with denoise_strategy and refer
to the documentation of :func:`nilearn.input_data.fmriprep_confounds`
Returns
-------
confounds : pandas.DataFrame, or list of
A reduced version of :term:`fMRIPrep` confounds based on selected
strategy and flags.
An intercept is automatically added to the list of confounds.
The columns contains the labels of the regressors.
sample_mask : None, numpy.ndarray, or list of
When no volumns require removal, the value is None.
Otherwise, shape: (number of scans - number of volumes removed, )
The index of the niimgs along time/fourth dimension for valid volumes
for subsequent analysis.
This attribute should be passed to parameter `sample_mask` of
:class:`nilearn.input_data.NiftiMasker` or
:func:`nilearn.signal.clean`.
Volumns are removed if flagged as following:
- Non-steady-state volumes (if present)
- Motion outliers detected by scrubbing
Notes
-----
ICA-AROMA is implemented in two steps in :footcite:`Pruim2015`:
1. A non-aggressive denoising immediately after ICA classification.
A linear regression estimates signals with all independent
components as predictors. A partial regression is then applied to
remove variance associated with noise independent components.
:term:`fMRIPrep` performs this step and generates files in
`MNI152NLin6Asym` template, suffixed with
`desc-smoothAROMAnonaggr_bold`.
One can produce `desc-smoothAROMAnonaggr_bold` in other spatial
templates, please refer to :term:`fMRIPrep` documentation on ICA-AROMA
`<https://fmriprep.org/en/latest/workflows.html#ica-aroma>`_
2. Confound regression step (mean signals from WM and CSF).
Confound regressors generated by this function with
`denoise_strategy="ica_aroma"`.
For more discussion regarding choosing the nuisance regressors before or
after denoising with ICA-AROMA has a detriment on outcome measures,
please see notebook 5.
`<https://github.com/nipreps/fmriprep-notebooks/>`_
References
-----------
.. footbibliography::
"""
default_parameters = preset_strategies[denoise_strategy].copy()
check_parameters = list(default_parameters.keys())
check_parameters.remove("strategy")
# ICA-AROMA only accept the non-aggressive strategy
# ignore user passed value
if "ica_aroma" in default_parameters:
check_parameters.remove("ica_aroma")
user_parameters, not_needed = _update_user_inputs(kwargs,
default_parameters,
check_parameters)
# raise warning about parameters not needed
if not_needed:
warnings.warn("The following parameters are not needed for the "
f"selected strategy '{denoise_strategy}': {not_needed}; "
f"parameters accepted: {check_parameters}"
)
return fmriprep_confounds(img_files, **user_parameters)
|
def fmriprep_confounds_strategy(img_files, denoise_strategy="simple",
**kwargs):
"""
Use preset strategy to load confounds from :term:`fMRIPrep`.
`fmriprep_confounds_strategy` provides an interface to select confounds
based on past literature with limited parameters for user customisation.
.. versionadded:: 0.8.2
Parameters
----------
img_files : path to processed image files, optionally as a list.
Processed nii.gz/dtseries.nii/func.gii file reside in a
:term:`fMRIPrep` generated functional derivative directory (i.e.The
associated confound files should be in the same directory as the image
file). As long as the image file, confound related tsv and json are in
the same directory with BIDS-complied names, `fmriprep_confounds` can
retrieve the relevant files correctly.
- `nii.gz` or `dtseries.nii`: path to files, optionally as a list.
- `func.gii`: list of a pair of paths to files, optionally as a list
of lists.
denoise_strategy : {'simple', 'srubbing', 'compcor', 'ica_aroma'}
Name of preset denoising strategies. Each strategy has a set of
associated configurable parameters. For the documentation on
additional parameters, please refer to
:func:`nilearn.input_data.fmriprep_confounds`.
- 'simple': Load confounds for a simple denosing strategy commonly
used in resting state functional connectivity, described in
:footcite:`Fox2005`. Default as: full motion parameters,
full WM/CSF signals, and high pass filter, with an option to
extract global signal confounds.
Additional parameters: motion, wm_csf, global_signal, demean
- 'srubbing': Load confounds for scrubbing describbed in
:footcite:`Power2012`.Default as: full motion parameters,
full WM/CSF signals, remove segment smaller than 5 continuous
volumes (see docstring of
:func:`nilearn.input_data.fmriprep_confounds`),
high pass filter, with an option to extract global signal confounds.
Additional parameters: motion, wm_csf, scrub, fd_thresh,
std_dvars_thresh, global_signal, demean
- 'compcor': Load confounds using the CompCor strategy from
:footcite:`BEHZADI200790`. Default with full motion parameters,
high pass filter, and anatomical compcor with combined mask.
Additional parameters: motion, n_compcor, compcor, demean
- 'ica_aroma': Load confounds for non-aggresive ICA-AROMA strategy
described in :footcite:`Pruim2015`. The strategy requires
:term:`fMRIPrep` outputs generated with `--use-aroma` suffixed with
`desc-smoothAROMAnonaggr_bold`. See notes for more details about
this option.
Additional parameters: wm_csf, global_signal, demean
Other keyword arguments:
See additional parameters associated with denoise_strategy and refer
to the documentation of :func:`nilearn.input_data.fmriprep_confounds`
Returns
-------
confounds : pandas.DataFrame, or list of
A reduced version of :term:`fMRIPrep` confounds based on selected
strategy and flags.
An intercept is automatically added to the list of confounds.
The columns contains the labels of the regressors.
sample_mask : None, numpy.ndarray, or list of
When no volumns require removal, the value is None.
Otherwise, shape: (number of scans - number of volumes removed, )
The index of the niimgs along time/fourth dimension for valid volumes
for subsequent analysis.
This attribute should be passed to parameter `sample_mask` of
:class:`nilearn.input_data.NiftiMasker` or
:func:`nilearn.signal.clean`.
Volumns are removed if flagged as following:
- Non-steady-state volumes (if present)
- Motion outliers detected by scrubbing
Notes
-----
ICA-AROMA is implemented in two steps in :footcite:`Pruim2015`:
1. A non-aggressive denoising immediately after ICA classification.
A linear regression estimates signals with all independent
components as predictors. A partial regression is then applied to
remove variance associated with noise independent components.
:term:`fMRIPrep` performs this step and generates files in
`MNI152NLin6Asym` template, suffixed with
`desc-smoothAROMAnonaggr_bold`.
One can produce `desc-smoothAROMAnonaggr_bold` in other spatial
templates, please refer to :term:`fMRIPrep` documentation on ICA-AROMA
`<https://fmriprep.org/en/latest/workflows.html#ica-aroma>`_
2. Confound regression step (mean signals from WM and CSF).
Confound regressors generated by this function with
`denoise_strategy="ica_aroma"`.
For more discussion regarding choosing the nuisance regressors before or
after denoising with ICA-AROMA has a detriment on outcome measures,
please see notebook 5.
`<https://github.com/nipreps/fmriprep-notebooks/>`_
References
-----------
.. footbibliography::
"""
default_parameters = preset_strategies[denoise_strategy].copy()
check_parameters = list(default_parameters.keys())
check_parameters.remove("strategy")
# ICA-AROMA only accept the non-aggressive strategy
# ignore user passed value
if "ica_aroma" in default_parameters:
check_parameters.remove("ica_aroma")
user_parameters, not_needed = _update_user_inputs(kwargs,
default_parameters,
check_parameters)
# raise warning about parameters not needed
if not_needed:
warnings.warn("The following parameters are not needed for the "
f"selected strategy '{denoise_strategy}': {not_needed}; "
f"parameters accepted: {check_parameters}"
)
return fmriprep_confounds(img_files, **user_parameters)
|
3,650 |
def _parse_size(size_str):
"""Convert memory size strings ('12 GB' etc.) to float"""
suffixes = {'': 1.0, 'b': 1.0,
'k': 1e3, 'm': 1e6, 'g': 1e9, 't': 1e12,
'kb': 1e3, 'mb': 1e6, 'gb': 1e9, 'tb': 1e12}
size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format(
'|'.join(suffixes.keys())), re.I)
m = size_re.match(size_str.lower())
if not m or m.group(2) not in suffixes:
raise ValueError("value {!r} not a valid size".format(size_str))
return float(m.group(1)) * suffixes[m.group(2)]
|
def _parse_size(size_str):
"""Convert memory size strings ('12 GB' etc.) to float"""
suffixes = {'': 1.0, 'b': 1.0,
'k': 1e3, 'm': 1e6, 'g': 1e9, 't': 1e12,
'kb': 1e3, 'mb': 1e6, 'gb': 1e9, 'tb': 1e12}
size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format(
'|'.join(suffixes.keys())), re.I)
m = size_re.match(size_str.lower())
if not m or m.group(2) not in suffixes:
raise ValueError("value {!r} not a valid size".format(size_str))
return int(float(m.group(1)) * suffixes[m.group(2)])
|
43,614 |
def sd_excitations(n_electrons, n_orbitals, delta_sz):
r"""Generates single and double excitations from a Hartree-Fock (HF) reference state
The singly- and doubly-excited configurations are generated by acting with the operators
:math:`\hat T_1` and :math:`\hat T_2` on the HF state:
.. math:
&& \vert \Phi_\mathrm{S} \rangle = \hat{T}_1 \vert \mathrm{HF} \rangle = \sum_{r \in
\mathrm{occ} \\ p \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF}
\rangle \\
&& \vert \Phi_\mathrm{D} \rangle = \hat{T}_2 \vert \mathrm{HF} \rangle = \sum_{r>s \in
\mathrm{occ} \\ p>q \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_q^\dagger
\hat{c}_r \hat{c}_s \vert \mathrm{HF} \rangle
where the indices :math:`r, s` and :math:`p, q` run over the occupied (occ) and unoccupied,
referred to as virtual (virt), molecular orbitals and :math:`\hat c` and
:math:`\hat c^\dagger` are the electron annihilation and creation operators, respectively.
**Example usage:**
>>> ph, pphh = sd_configs(2, 4, 0)
>>> print(ph)
[[0, 2], [1, 3]]
>>> print(pphh)
[[0, 1, 2, 3]]
Args:
n_electrons (int): number of active electrons
n_orbitals (int): number of active orbitals
delta_sz (int): spin-projection selection rule.
For single excitations ``sz[p] - sz[r] = delta_sz``.
For double excitations ``sz[p] + sz[p] - sz[r] - sz[s] = delta_sz``.
``sz`` is the single-particle state spin quantum number and ``delta_sz``, in the
case of singles and doubles, can take the values :math:`0`, :math:`\pm 1`
and :math:`\pm 2`.
Returns:
tuple(list, list): lists with the indices of the molecular orbitals
involved in the single and double excitations
"""
if not n_electrons > 0:
raise ValueError(
"The number of active electrons has to be greater than 0 \n"
"Got n_electrons = {}".format(n_electrons)
)
if n_orbitals <= n_electrons:
raise ValueError(
"The number of active orbitals ({}) "
"has to be greater than the number of active electrons ({})."
.format(n_orbitals, n_electrons)
)
if int(delta_sz) not in (0, 1, -1, 2, -2):
raise ValueError(
"Expected values for 'delta_sz' are 0, +/- 1 and +/- 2 but got ({})."
.format(delta_sz)
)
# define the spin quantum number 'sz' of each orbital
sz = np.array([0.5 if (i % 2 == 0) else -0.5 for i in range(n_orbitals)])
# nested list with the indices 'p, r' for each 1particle-1hole (ph) configuration
ph = [
[r, p]
for r in range(n_electrons) for p in range(n_electrons, n_orbitals)
if sz[p]-sz[r] == delta_sz
]
# nested list with the indices 's, r, q, p' for each 2particle-2hole (pphh) configuration
pphh = [
[s, r, q, p]
for s in range(n_electrons-1) for r in range(s+1, n_electrons)
for q in range(n_electrons, n_orbitals-1) for p in range(q+1, n_orbitals)
if (sz[p] + sz[q] - sz[r] - sz[s]) == delta_sz
]
return ph, pphh
|
def sd_excitations(n_electrons, n_orbitals, delta_sz):
r"""Generates single and double excitations from a Hartree-Fock (HF) reference state.
The singly- and doubly-excited configurations are generated by acting with the operators
:math:`\hat T_1` and :math:`\hat T_2` on the HF state:
.. math:
&& \vert \Phi_\mathrm{S} \rangle = \hat{T}_1 \vert \mathrm{HF} \rangle = \sum_{r \in
\mathrm{occ} \\ p \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF}
\rangle \\
&& \vert \Phi_\mathrm{D} \rangle = \hat{T}_2 \vert \mathrm{HF} \rangle = \sum_{r>s \in
\mathrm{occ} \\ p>q \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_q^\dagger
\hat{c}_r \hat{c}_s \vert \mathrm{HF} \rangle
where the indices :math:`r, s` and :math:`p, q` run over the occupied (occ) and unoccupied,
referred to as virtual (virt), molecular orbitals and :math:`\hat c` and
:math:`\hat c^\dagger` are the electron annihilation and creation operators, respectively.
**Example usage:**
>>> ph, pphh = sd_configs(2, 4, 0)
>>> print(ph)
[[0, 2], [1, 3]]
>>> print(pphh)
[[0, 1, 2, 3]]
Args:
n_electrons (int): number of active electrons
n_orbitals (int): number of active orbitals
delta_sz (int): spin-projection selection rule.
For single excitations ``sz[p] - sz[r] = delta_sz``.
For double excitations ``sz[p] + sz[p] - sz[r] - sz[s] = delta_sz``.
``sz`` is the single-particle state spin quantum number and ``delta_sz``, in the
case of singles and doubles, can take the values :math:`0`, :math:`\pm 1`
and :math:`\pm 2`.
Returns:
tuple(list, list): lists with the indices of the molecular orbitals
involved in the single and double excitations
"""
if not n_electrons > 0:
raise ValueError(
"The number of active electrons has to be greater than 0 \n"
"Got n_electrons = {}".format(n_electrons)
)
if n_orbitals <= n_electrons:
raise ValueError(
"The number of active orbitals ({}) "
"has to be greater than the number of active electrons ({})."
.format(n_orbitals, n_electrons)
)
if int(delta_sz) not in (0, 1, -1, 2, -2):
raise ValueError(
"Expected values for 'delta_sz' are 0, +/- 1 and +/- 2 but got ({})."
.format(delta_sz)
)
# define the spin quantum number 'sz' of each orbital
sz = np.array([0.5 if (i % 2 == 0) else -0.5 for i in range(n_orbitals)])
# nested list with the indices 'p, r' for each 1particle-1hole (ph) configuration
ph = [
[r, p]
for r in range(n_electrons) for p in range(n_electrons, n_orbitals)
if sz[p]-sz[r] == delta_sz
]
# nested list with the indices 's, r, q, p' for each 2particle-2hole (pphh) configuration
pphh = [
[s, r, q, p]
for s in range(n_electrons-1) for r in range(s+1, n_electrons)
for q in range(n_electrons, n_orbitals-1) for p in range(q+1, n_orbitals)
if (sz[p] + sz[q] - sz[r] - sz[s]) == delta_sz
]
return ph, pphh
|
29,529 |
def compile_filter(bpf_filter, iface=None, iface_type=None):
"""Asks Tcpdump to parse the filter, then build the matching
BPF bytecode using get_bpf_pointer.
"""
if not TCPDUMP:
raise Scapy_Exception("tcpdump is not available. Cannot use filter !")
fake_pcap = None
tcpdump_opts = [
conf.prog.tcpdump,
"-ddd",
"-s", str(MTU),
]
if iface_type:
fake_pcap = codecs.getdecoder('hex')(FAKE_PCAP % (iface_type, ))[0]
tcpdump_opts.append("-r-")
else:
tcpdump_opts.extend(
["-p", "-i", (conf.iface if iface is None else iface)]
)
tcpdump_opts.append(bpf_filter)
try:
process = subprocess.Popen(
tcpdump_opts,
stdin=subprocess.PIPE if fake_pcap else None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
except OSError as ex:
raise Scapy_Exception("Failed to attach filter: %s" % ex)
if fake_pcap:
lines, err = process.communicate(fake_pcap)
else:
lines, err = process.communicate()
ret = process.returncode
if ret:
raise Scapy_Exception(
"Failed to attach filter: tcpdump returned: %s" % err
)
lines = lines.strip().split(b"\n")
return get_bpf_pointer(lines)
|
def compile_filter(bpf_filter, iface=None, iface_type=None):
"""Asks Tcpdump to parse the filter, then build the matching
BPF bytecode using get_bpf_pointer.
"""
if not TCPDUMP:
raise Scapy_Exception("tcpdump is not available. Cannot use filter !")
fake_pcap = None
tcpdump_opts = [
conf.prog.tcpdump,
"-ddd",
"-s", str(MTU),
]
if iface_type:
fake_pcap = codecs.getdecoder('hex')(FAKE_PCAP % (iface_type, ))[0]
tcpdump_opts.extend(["-r", "-"])
else:
tcpdump_opts.extend(
["-p", "-i", (conf.iface if iface is None else iface)]
)
tcpdump_opts.append(bpf_filter)
try:
process = subprocess.Popen(
tcpdump_opts,
stdin=subprocess.PIPE if fake_pcap else None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
except OSError as ex:
raise Scapy_Exception("Failed to attach filter: %s" % ex)
if fake_pcap:
lines, err = process.communicate(fake_pcap)
else:
lines, err = process.communicate()
ret = process.returncode
if ret:
raise Scapy_Exception(
"Failed to attach filter: tcpdump returned: %s" % err
)
lines = lines.strip().split(b"\n")
return get_bpf_pointer(lines)
|
44,811 |
def _traverse_stage(stage):
from pyspark.ml import Pipeline
from pyspark.ml.classification import OneVsRest
yield stage
if isinstance(stage, Pipeline):
original_sub_stages = stage.getStages()
if not isinstance(original_sub_stages, list):
raise ValueError(
f"Pipeline stages should be a list but get object {str(original_sub_stages)}"
)
for stage in original_sub_stages:
yield from _traverse_stage(stage)
elif isinstance(stage, OneVsRest):
yield from _traverse_stage(stage.getClassifier())
elif _is_parameter_search_estimator(stage):
yield from _traverse_stage(stage.getEstimator())
yield from _traverse_stage(stage.getEvaluator())
|
def _traverse_stage(stage):
from pyspark.ml import Pipeline
from pyspark.ml.classification import OneVsRest
yield stage
if isinstance(stage, Pipeline):
original_sub_stages = stage.getStages()
if not isinstance(original_sub_stages, list):
raise ValueError(
f"Pipeline stages should be a list, but found object {str(original_sub_stages)}"
)
for stage in original_sub_stages:
yield from _traverse_stage(stage)
elif isinstance(stage, OneVsRest):
yield from _traverse_stage(stage.getClassifier())
elif _is_parameter_search_estimator(stage):
yield from _traverse_stage(stage.getEstimator())
yield from _traverse_stage(stage.getEvaluator())
|
7,037 |
def get_localhost_install_target():
"""Returns the install target of localhost platform"""
localhost = get_platform('localhost')
return get_install_target_from_platform(localhost)
|
def get_localhost_install_target():
"""Returns the install target of localhost platform"""
localhost = get_platform()
return get_install_target_from_platform(localhost)
|
10,753 |
def literally(obj):
"""Forces numba to take *obj* as an Literal value.
*obj* must be either a literal or an argument of the caller function, where
the argument must be bounded to a literal. The literal requirement
propagates up the call stack.
This function is intercepted by the compiler to alter its behavior to
wrap corresponding function parameters as ``Literal``. It has **no effect**
outside of nopython-mode (interpreter, and objectmode).
The current implementation detects literal arguments in two ways:
1. Scan uses of ``literally`` via a rewrite pass.
2. ``literally`` is overloaded to raise ``numba.errors.ForceLiteralArg``
to signal the dispatcher to treat the corresponding parameter
differently. This mode is to support indirect use (via a function call).
The execution semantic of this function is equivalent to an identity
function.
See :ghfile:`numba/tests/test_literal_dispatch.py` for examples.
"""
return obj
|
def literally(obj):
"""Forces numba to take *obj* as an Literal value.
*obj* must be either a literal or an argument of the caller function, where
the argument must be bounded to a literal. The literal requirement
propagates up the call stack.
This function is intercepted by the compiler to alter the compilation behavior to
wrap corresponding function parameters as ``Literal``. It has **no effect**
outside of nopython-mode (interpreter, and objectmode).
The current implementation detects literal arguments in two ways:
1. Scan uses of ``literally`` via a rewrite pass.
2. ``literally`` is overloaded to raise ``numba.errors.ForceLiteralArg``
to signal the dispatcher to treat the corresponding parameter
differently. This mode is to support indirect use (via a function call).
The execution semantic of this function is equivalent to an identity
function.
See :ghfile:`numba/tests/test_literal_dispatch.py` for examples.
"""
return obj
|
5,779 |
def fit(dist, data, shape_bounds=None, *, loc_bounds=None, scale_bounds=None,
optimizer=optimize.differential_evolution):
r"""Fit a discrete or continuous distribution to data
Given a distribution, data, and bounds for the shape parameters of the
distribution, return maximum likelihood estimates of the shape parameters.
Parameters
----------
dist : scipy.stats.rv_continuous or scipy.stats.rv_discrete
The object representing the distribution to be fit to the data.
data : 1D array_like
The data to which the distribution is to be fit. If the data contain
any of ``np.nan``, ``np.inf``, or -``np.inf``, the fit method will
raise a ``ValueError``.
shape_bounds : dict or sequence of tuples
If a dictionary, each key is the name of a shape parameter of the
distribution, and the corresponding value is a tuple containing the
lower and upper bound on that shape parameter. If the distribution is
defined only for a finite range of values of that parameter, no entry
for that parameter is required; e.g., some distributions have
parameters which must be on the interval [0, 1].
If a sequence, element *i* is a tuple containing the lower and upper
bound on the *i*\ th shape parameter of the distribution. In this case,
bounds for all distribution shape parameters must be provided.
If a shape parameter is to be held as fixed (e.g. if it is known), the
lower and upper bounds may be equal. If a user-provided lower or upper
bound is beyond a bound of the domain for which the distribution is
defined, the bound of the distribution's domain will replace the
user-provided value. Similarly, shape parameters which must be integral
will be constrained to integral values within the user-provided bounds.
loc_bounds : tuple, optional
Lower and upper bound on the distribution's location parameter ``loc``.
scale_bounds : tuple, optional
Lower and upper bound on the distribution's scale parameter ``scale``.
optimizer : callable, optional
`optimizer` is a callable that accepts the following positional
arguments.
fun : callable
The objective function to be optimized. `fun` accepts one argument
``x``, candidate shape parameters of the distribution, and returns
the negative log-likelihood function given ``x``, `dist`, and the
provided `data`.
The job of `optimizer` is to find values of the decision variables
that minimizes `fun`.
bounds : sequence of tuples
The bounds on values of the decision variables; each element will
be a tuple containing the lower and upper bound on a decision
variable.
If the distribution has any shape parameters that must be integral or
if the distribution is discrete and the location parameter is not
fixed, `optimizer` must also accept the following *keyword* argument.
integrality : array_like of bools
For each decision variable, True if the decision variable is
must be constrained to integer values and False if the decision
variable is continuous.
`optimizer` must return an object, such as an instance of
`scipy.optimize.OptimizeResult`, which holds the optimal values of
the decision variables in an attribute ``x``. If attributes
``fun``, ``status``, or ``message`` are provided, they will be
included in the result object returned by `fit`.
Returns
-------
result : FitResult
An object with the following fields.
dist : scipy.stats.rv_continuous or scipy.stats.rv_discrete
The distribution object passed to `fit` as `dist`
data : 1D array_like
The data passed to `fit` as `data`
nllf : callable
The negative log-likehood function for the given `data`. Accepts a
single tuple containing the shapes, location, and scale of the
distribution.
params : namedtuple
A namedtuple containing the maximum likelihood estimates of the
shape parameters, location, and (if applicable) scale of the
distribution.
objective_val : float
The value of the negative log likelihood function evaluated with
the fitted shapes, i.e. ``result.nllf(result.params)``.
success : bool or None
Whether the optimizer considered the optimization to terminate
successfully or not.
message : str or None
Any status message provided by the optimizer.
See Also
--------
rv_continuous, rv_discrete
Notes
-----
Optimization is more likely to converge to the maximum likelihood estimate
when the user provides tight bounds containing the maximum likelihood
estimate. For example, when fitting a binomial distribution to data, the
number of experiments underlying each sample may be known, in which case
the corresponding shape parameter ``n`` can be fixed.
Examples
--------
Suppose we wish to fit a distribution to the following data.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> dist = stats.nbinom
>>> shapes = (5, 0.5)
>>> data = dist.rvs(*shapes, size=1000, random_state=rng)
Suppose we do not know how the data were generated, but we suspect that
it follows a negative binomial distribution with parameters *n* and *p*\.
(See `scipy.stats.nbinom`.) We believe that the parameter *n* was fewer
than 30, and we know that the parameter *p* must lie on the interval
[0, 1]. We record this information in a variable `shape_bounds` and pass
this information to `fit`.
>>> shape_bounds = [(0, 30), (0, 1)]
>>> res = stats.fit(dist, data, shape_bounds)
`fit` searches within the user-specified `shape_bounds` for the
values that best match the data (in the sense of maximum likelihood
estimation). In this case, it found shape values similar to those
from which the data were actually generated.
>>> res.params
FitShapes(n=5.0, p=0.5028157644634368, loc=0.0) # may vary
We can visualize the results by superposing the probability mass function
of the distribution (with the shapes fit to the data) over a normalized
histogram of the data.
>>> res.plot()
Note that the estimate for *n* was exactly integral; this is because
the domain of the `nbinom` PMF includes only integral *n*, and the `nbinom`
object "knows" that. `nbinom` also knows that the shape *p* must be a
value between 0 and 1. In such a case - when the domain of the distribution
with respect to a parameter is finite - we are not required to specify
bounds for the parameter.
>>> shape_bounds = {'n': (0, 30)} # omit parameter p using a `dict`
>>> res2 = stats.fit(dist, data, shape_bounds)
>>> np.testing.assert_allclose(res.params, res2.params)
True
If we wish to force the distribution to be fit with *n* fixed at 6, we can
set both the lower and upper bounds on *n* to 6. Note, however, that the
value of the objective function being optimized is worse (higher) in this
case.
>>> shape_bounds = {'n': (6, 6)} # fix parameter `n`
>>> res3 = stats.fit(dist, data, shape_bounds)
>>> res3.params
FitShapes(n=6.0, p=0.5486556076755706, loc=0.0) # may vary
>>> res3.objective_val > res.objective_val
True
Note that any `optimizer` parameter allows us to control the optimizer
used to perform the fitting. The default optimizer is
`scipy.optimize.differential_evolution` with its own default settings,
but we can easily change these settings by creating our own optimizer
with different defaults.
>>> from scipy.optimize import differential_evolution
>>> def optimizer(fun, bounds, *, integrality):
... return differential_evolution(fun, bounds, strategy='best2bin',
... seed=rng, integrality=integrality)
>>> shape_bounds = [(0, 30), (0, 1)]
>>> res4 = stats.fit(dist, data, shape_bounds, optimizer=optimizer)
>>> res4.params
FitShapes(n=5.0, p=0.5032774713044523, loc=0.0) # may vary
"""
# --- Input Validation / Standardization --- #
# distribution input validation and information collection
if getattr(dist, "pdf", False): # can't use isinstance for types
loc_bounds = loc_bounds or (0, 0)
scale_bounds = scale_bounds or (1, 1)
loc_scale_integrality = [False, False]
discrete = False
elif getattr(dist, "pmf", False):
loc_bounds = loc_bounds or (0, 0)
loc_scale_integrality = [True]
discrete = True
else:
message = ("`dist` must be an instance of `rv_continuous` "
"or `rv_discrete.`")
raise ValueError(message)
# data input validation
data = np.asarray(data)
if data.ndim != 1:
message = "`data` must be exactly one-dimensional."
raise ValueError(message)
if not (np.issubdtype(data.dtype, np.number)
and np.all(np.isfinite(data))):
message = "All elements of `data` must be finite numbers."
raise ValueError(message)
# shape_bounds input validation and information collection
shape_info = dist._get_shape_info()
n_shapes = len(shape_info)
if shape_bounds is None or np.array(shape_bounds).size == 0:
shape_bounds = {}
if isinstance(shape_bounds, dict):
shape_bounds = shape_bounds.copy() # don't mutate the user's object
shape_bounds_array = np.empty((n_shapes, 2))
for i in range(n_shapes):
shape_name = shape_info[i].shape_name
shape_bound = (shape_bounds.pop(shape_name, None)
or shape_info[i].domain)
shape_bounds_array[i] = shape_bound
if shape_bounds:
message = ("Bounds provided for the following unrecognized shapes "
f"will be ignored: {', '.join(shape_bounds.keys())}")
warnings.warn(message, RuntimeWarning, stacklevel=2)
shape_bounds = shape_bounds_array
else:
shape_bounds = np.asarray(shape_bounds, dtype=float)
if shape_bounds.shape != (n_shapes, 2):
message = (f"`shape_bounds` must have {n_shapes} elements, tuples "
"containing the lower and upper bounds for the following "
f"shape parameters: {dist.shapes}")
raise ValueError(message)
validated_shape_bounds = []
for i in range(n_shapes):
name = shape_info[i].shape_name
user_bounds = shape_bounds[i]
shape_domain = shape_info[i].domain
integral = shape_info[i].integrality
bound_type = "shape_bounds"
combined = _combine_bounds(name, bound_type, user_bounds, shape_domain,
integral)
validated_shape_bounds.append(combined)
# loc_bounds and scale_bounds input validation
loc_bounds = _combine_bounds("loc", "loc_bounds", loc_bounds,
(-np.inf, np.inf), discrete)
loc_bounds = [loc_bounds]
if discrete and scale_bounds is not None:
message = (f"`{dist.name}` is an instance of `rv_discrete`, which "
"does not have `scale`. `scale_bounds` will be ignored.")
warnings.warn(message, RuntimeWarning)
if discrete:
scale_bounds = []
else:
scale_bounds = [_combine_bounds("scale", "scale_bounds", scale_bounds,
(0, np.inf), False)]
bounds = validated_shape_bounds + loc_bounds + scale_bounds
integrality = ([shape.integrality for shape in shape_info]
+ loc_scale_integrality)
# --- MLE Fitting --- #
def nllf(free_params, data=data): # bind data NOW
with np.errstate(invalid='ignore', divide='ignore'):
return dist._penalized_nnlf(free_params, data)
with np.errstate(invalid='ignore', divide='ignore'):
if np.any(integrality):
res = optimizer(nllf, bounds, integrality=integrality)
else:
res = optimizer(nllf, bounds)
return FitResult(dist, data, discrete, nllf, res)
|
def fit(dist, data, shape_bounds=None, *, loc_bounds=None, scale_bounds=None,
optimizer=optimize.differential_evolution):
r"""Fit a discrete or continuous distribution to data
Given a distribution, data, and bounds for the shape parameters of the
distribution, return maximum likelihood estimates of the shape parameters.
Parameters
----------
dist : scipy.stats.rv_continuous or scipy.stats.rv_discrete
The object representing the distribution to be fit to the data.
data : 1D array_like
The data to which the distribution is to be fit. If the data contain
any of ``np.nan``, ``np.inf``, or -``np.inf``, the fit method will
raise a ``ValueError``.
shape_bounds : dict or sequence of tuples
If a dictionary, each key is the name of a shape parameter of the
distribution, and the corresponding value is a tuple containing the
lower and upper bound on that shape parameter. If the distribution is
defined only for a finite range of values of that parameter, no entry
for that parameter is required; e.g., some distributions have
parameters which must be on the interval [0, 1].
If a sequence, element *i* is a tuple containing the lower and upper
bound on the *i*\ th shape parameter of the distribution. In this case,
bounds for all distribution shape parameters must be provided.
If a shape parameter is to be held as fixed (e.g. if it is known), the
lower and upper bounds may be equal. If a user-provided lower or upper
bound is beyond a bound of the domain for which the distribution is
defined, the bound of the distribution's domain will replace the
user-provided value. Similarly, shape parameters which must be integral
will be constrained to integral values within the user-provided bounds.
loc_bounds : tuple, optional
Lower and upper bound on the distribution's location parameter ``loc``.
scale_bounds : tuple, optional
Lower and upper bound on the distribution's scale parameter ``scale``.
optimizer : callable, optional
`optimizer` is a callable that accepts the following positional
arguments.
fun : callable
The objective function to be optimized. `fun` accepts one argument
``x``, candidate shape parameters of the distribution, and returns
the negative log-likelihood function given ``x``, `dist`, and the
provided `data`.
The job of `optimizer` is to find values of the decision variables
that minimizes `fun`.
bounds : sequence of tuples
The bounds on values of the decision variables; each element will
be a tuple containing the lower and upper bound on a decision
variable.
If the distribution has any shape parameters that must be integral or
if the distribution is discrete and the location parameter is not
fixed, `optimizer` must also accept the following *keyword* argument.
integrality : array_like of bools
For each decision variable, True if the decision variable is
must be constrained to integer values and False if the decision
variable is continuous.
`optimizer` must return an object, such as an instance of
`scipy.optimize.OptimizeResult`, which holds the optimal values of
the decision variables in an attribute ``x``. If attributes
``fun``, ``status``, or ``message`` are provided, they will be
included in the result object returned by `fit`.
Returns
-------
result : FitResult
An object with the following fields.
dist : scipy.stats.rv_continuous or scipy.stats.rv_discrete
The distribution object passed to `fit` as `dist`
data : 1D array_like
The data passed to `fit` as `data`
nllf : callable
The negative log-likehood function for the given `data`. Accepts a
single tuple containing the shapes, location, and scale of the
distribution.
params : namedtuple
A namedtuple containing the maximum likelihood estimates of the
shape parameters, location, and (if applicable) scale of the
distribution.
objective_val : float
The value of the negative log likelihood function evaluated with
the fitted shapes, i.e. ``result.nllf(result.params)``.
success : bool or None
Whether the optimizer considered the optimization to terminate
successfully or not.
message : str or None
Any status message provided by the optimizer.
See Also
--------
rv_continuous, rv_discrete
Notes
-----
Optimization is more likely to converge to the maximum likelihood estimate
when the user provides tight bounds containing the maximum likelihood
estimate. For example, when fitting a binomial distribution to data, the
number of experiments underlying each sample may be known, in which case
the corresponding shape parameter ``n`` can be fixed.
Examples
--------
Suppose we wish to fit a distribution to the following data.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> dist = stats.nbinom
>>> shapes = (5, 0.5)
>>> data = dist.rvs(*shapes, size=1000, random_state=rng)
Suppose we do not know how the data were generated, but we suspect that
it follows a negative binomial distribution with parameters *n* and *p*\.
(See `scipy.stats.nbinom`.) We believe that the parameter *n* was fewer
than 30, and we know that the parameter *p* must lie on the interval
[0, 1]. We record this information in a variable `shape_bounds` and pass
this information to `fit`.
>>> shape_bounds = [(0, 30), (0, 1)]
>>> res = stats.fit(dist, data, shape_bounds)
`fit` searches within the user-specified `shape_bounds` for the
values that best match the data (in the sense of maximum likelihood
estimation). In this case, it found shape values similar to those
from which the data were actually generated.
>>> res.params
FitShapes(n=5.0, p=0.5028157644634368, loc=0.0) # may vary
We can visualize the results by superposing the probability mass function
of the distribution (with the shapes fit to the data) over a normalized
histogram of the data.
>>> res.plot()
Note that the estimate for *n* was exactly integral; this is because
the domain of the `nbinom` PMF includes only integral *n*, and the `nbinom`
object "knows" that. `nbinom` also knows that the shape *p* must be a
value between 0 and 1. In such a case - when the domain of the distribution
with respect to a parameter is finite - we are not required to specify
bounds for the parameter.
>>> shape_bounds = {'n': (0, 30)} # omit parameter p using a `dict`
>>> res2 = stats.fit(dist, data, shape_bounds)
>>> np.testing.assert_allclose(res.params, res2.params)
True
If we wish to force the distribution to be fit with *n* fixed at 6, we can
set both the lower and upper bounds on *n* to 6. Note, however, that the
value of the objective function being optimized is worse (higher) in this
case.
>>> shape_bounds = {'n': (6, 6)} # fix parameter `n`
>>> res3 = stats.fit(dist, data, shape_bounds)
>>> res3.params
FitShapes(n=6.0, p=0.5486556076755706, loc=0.0) # may vary
>>> res3.objective_val > res.objective_val
True
Note that any `optimizer` parameter allows us to control the optimizer
used to perform the fitting. The default optimizer is
`scipy.optimize.differential_evolution` with its own default settings,
but we can easily change these settings by creating our own optimizer
with different defaults.
>>> from scipy.optimize import differential_evolution
>>> def optimizer(fun, bounds, *, integrality):
... return differential_evolution(fun, bounds, strategy='best2bin',
... seed=rng, integrality=integrality)
>>> shape_bounds = [(0, 30), (0, 1)]
>>> res4 = stats.fit(dist, data, shape_bounds, optimizer=optimizer)
>>> res4.params
FitShapes(n=5.0, p=0.5032774713044523, loc=0.0) # may vary
"""
# --- Input Validation / Standardization --- #
# distribution input validation and information collection
if getattr(dist, "pdf", False): # can't use isinstance for types
loc_bounds = loc_bounds or (0, 0)
scale_bounds = scale_bounds or (1, 1)
loc_scale_integrality = [False, False]
discrete = False
elif getattr(dist, "pmf", False):
loc_bounds = loc_bounds or (0, 0)
loc_scale_integrality = [True]
discrete = True
else:
message = ("`dist` must be an instance of `rv_continuous` "
"or `rv_discrete.`")
raise ValueError(message)
# data input validation
data = np.asarray(data)
if data.ndim != 1:
message = "`data` must be exactly one-dimensional."
raise ValueError(message)
if not (np.issubdtype(data.dtype, np.number)
and np.all(np.isfinite(data))):
message = "All elements of `data` must be finite numbers."
raise ValueError(message)
# shape_bounds input validation and information collection
shape_info = dist._get_shape_info()
n_shapes = len(shape_info)
if shape_bounds is None or np.array(shape_bounds).size == 0:
shape_bounds = {}
if isinstance(shape_bounds, dict):
shape_bounds = shape_bounds.copy() # don't mutate the user's object
shape_bounds_array = np.empty((n_shapes, 2))
for i in range(n_shapes):
shape_name = shape_info[i].shape_name
shape_bound = (shape_bounds.pop(shape_name, None)
or shape_info[i].domain)
shape_bounds_array[i] = shape_bound
if shape_bounds:
message = ("Bounds provided for the following unrecognized shapes "
f"will be ignored: {', '.join(shape_bounds.keys())}")
warnings.warn(message, RuntimeWarning, stacklevel=2)
shape_bounds = shape_bounds_array
else:
shape_bounds = np.asarray(shape_bounds, dtype=float)
if shape_bounds.shape != (n_shapes, 2):
message = (f"`shape_bounds` must have {n_shapes} elements, tuples "
"containing the lower and upper bounds for the following "
f"shape parameters: {dist.shapes}")
raise ValueError(message)
validated_shape_bounds = []
for i in range(n_shapes):
name = shape_info[i].shape_name
user_bounds = shape_bounds[i]
shape_domain = shape_info[i].domain
integral = shape_info[i].integrality
bound_type = "shape_bounds"
combined = _combine_bounds(name, bound_type, user_bounds, shape_domain,
integral)
validated_shape_bounds.append(combined)
# loc_bounds and scale_bounds input validation
loc_bounds = _combine_bounds("loc", "loc_bounds", loc_bounds,
(-np.inf, np.inf), discrete)
loc_bounds = [loc_bounds]
if discrete and scale_bounds is not None:
message = (f"`{dist.name}` is an instance of `rv_discrete`, which "
"does not have `scale`. `scale_bounds` will be ignored.")
warnings.warn(message, RuntimeWarning, stacklevel=2)
if discrete:
scale_bounds = []
else:
scale_bounds = [_combine_bounds("scale", "scale_bounds", scale_bounds,
(0, np.inf), False)]
bounds = validated_shape_bounds + loc_bounds + scale_bounds
integrality = ([shape.integrality for shape in shape_info]
+ loc_scale_integrality)
# --- MLE Fitting --- #
def nllf(free_params, data=data): # bind data NOW
with np.errstate(invalid='ignore', divide='ignore'):
return dist._penalized_nnlf(free_params, data)
with np.errstate(invalid='ignore', divide='ignore'):
if np.any(integrality):
res = optimizer(nllf, bounds, integrality=integrality)
else:
res = optimizer(nllf, bounds)
return FitResult(dist, data, discrete, nllf, res)
|
24,960 |
def get_global_option(
checker: BaseChecker,
option: GLOBAL_OPTION_NAMES,
default: T_GlobalOptionReturnTypes | None = None, # pylint: disable=unused-argument
) -> T_GlobalOptionReturnTypes | None:
"""DEPRECATED: Retrieve an option defined by the given *checker* or
by all known option providers.
It will look in the list of all options providers
until the given *option* will be found.
If the option wasn't found, the *default* value will be returned.
"""
warnings.warn(
"get_global_option has been deprecated. You can use "
"checker.linter.config to get all global options instead.",
DeprecationWarning,
)
return getattr(checker.linter.config, option.replace("-", "_"))
|
def get_global_option(
checker: BaseChecker,
option: GLOBAL_OPTION_NAMES,
default: T_GlobalOptionReturnTypes | None = None, # pylint: disable=unused-argument
) -> T_GlobalOptionReturnTypes | None: # pragma: no cover
"""DEPRECATED: Retrieve an option defined by the given *checker* or
by all known option providers.
It will look in the list of all options providers
until the given *option* will be found.
If the option wasn't found, the *default* value will be returned.
"""
warnings.warn(
"get_global_option has been deprecated. You can use "
"checker.linter.config to get all global options instead.",
DeprecationWarning,
)
return getattr(checker.linter.config, option.replace("-", "_"))
|
34,321 |
def get_rasa_sdk_version() -> Text:
"""Find out what the referenced version of the Rasa SDK is."""
env_file = project_root() / "requirements.txt"
with env_file.open() as f:
for line in f.readlines():
if "rasa-sdk" in line:
version = line.split("=")[-1]
return version.strip()
else:
raise Exception("Failed to find Rasa SDK version in requirements.txt")
|
def get_rasa_sdk_version() -> Text:
"""Find out what the referenced version of the Rasa SDK is."""
env_file = project_root() / "requirements.txt"
with env_file.open() as f:
for line in f:
if "rasa-sdk" in line:
version = line.split("=")[-1]
return version.strip()
else:
raise Exception("Failed to find Rasa SDK version in requirements.txt")
|
44,324 |
def config(metadata: dict = None, *,
# TODO: these can be typed more precisely
# Specifically, a Callable[A, B], where `B` is bound as a JSON type
encoder: Callable = None,
decoder: Callable = None,
mm_field: MarshmallowField = None,
letter_case: Callable[[str], str] = None,
undefined: Optional[Union[str, Undefined]] = None,
field_name: str = None,
exclude: Optional[Callable[[T], bool]] = None,
) -> Dict[str, dict]:
if metadata is None:
metadata = {}
lib_metadata = metadata.setdefault('dataclasses_json', {})
if encoder is not None:
lib_metadata['encoder'] = encoder
if decoder is not None:
lib_metadata['decoder'] = decoder
if mm_field is not None:
lib_metadata['mm_field'] = mm_field
if field_name is not None:
if letter_case is not None:
@functools.wraps(letter_case)
def override(_, _letter_case=letter_case, _field_name=field_name):
return _letter_case(_field_name)
else:
def override(_, _field_name=field_name):
return _field_name
letter_case = override
if letter_case is not None:
lib_metadata['letter_case'] = letter_case
if undefined is not None:
# Get the corresponding action for undefined parameters
if isinstance(undefined, str):
if not hasattr(Undefined, undefined.upper()):
valid_actions = list(action.name for action in Undefined)
raise UndefinedParameterError(
f"Invalid undefined parameter action, "
f"must be one of {valid_actions}")
undefined = Undefined[undefined.upper()]
lib_metadata['undefined'] = undefined
if exclude is not None:
lib_metadata['exclude'] = exclude
return metadata
|
def config(metadata: dict = None, *,
# TODO: these can be typed more precisely
# Specifically, a Callable[A, B], where `B` is bound as a JSON type
encoder: Callable = None,
decoder: Callable = None,
mm_field: MarshmallowField = None,
letter_case: Callable[[str], str] = None,
undefined: Optional[Union[str, Undefined]] = None,
field_name: str = None,
exclude: Optional[Callable[[Json], bool]] = None,
) -> Dict[str, dict]:
if metadata is None:
metadata = {}
lib_metadata = metadata.setdefault('dataclasses_json', {})
if encoder is not None:
lib_metadata['encoder'] = encoder
if decoder is not None:
lib_metadata['decoder'] = decoder
if mm_field is not None:
lib_metadata['mm_field'] = mm_field
if field_name is not None:
if letter_case is not None:
@functools.wraps(letter_case)
def override(_, _letter_case=letter_case, _field_name=field_name):
return _letter_case(_field_name)
else:
def override(_, _field_name=field_name):
return _field_name
letter_case = override
if letter_case is not None:
lib_metadata['letter_case'] = letter_case
if undefined is not None:
# Get the corresponding action for undefined parameters
if isinstance(undefined, str):
if not hasattr(Undefined, undefined.upper()):
valid_actions = list(action.name for action in Undefined)
raise UndefinedParameterError(
f"Invalid undefined parameter action, "
f"must be one of {valid_actions}")
undefined = Undefined[undefined.upper()]
lib_metadata['undefined'] = undefined
if exclude is not None:
lib_metadata['exclude'] = exclude
return metadata
|
32,457 |
def watchlist_update_action_command(client: Client, id: str, action_type: str,
enabled: bool) -> CommandResults:
if enabled == 'True':
enabled = True
elif enabled == 'False':
enabled = False
params = assign_params(enabled=enabled)
res = client.http_request(url=f'/v1/watchlist/{id}/action_type/{action_type}', method='PUT', json_data=params)
# res contains whether the task successful.
return CommandResults(readable_output=res.get('result'))
|
def watchlist_update_action_command(client: Client, id: str, action_type: str,
enabled: str) -> CommandResults:
if enabled == 'True':
enabled = True
elif enabled == 'False':
enabled = False
params = assign_params(enabled=enabled)
res = client.http_request(url=f'/v1/watchlist/{id}/action_type/{action_type}', method='PUT', json_data=params)
# res contains whether the task successful.
return CommandResults(readable_output=res.get('result'))
|
29,807 |
def get_next_x_cron_runs(num_runs, schedule, start_datetime):
iter = croniter(schedule, start_datetime)
next_runs = []
for _ in range(num_runs):
next_runs.append(iter.get_next(datetime))
return next_runs
|
def get_next_x_cron_runs(num_runs, schedule, start_datetime):
iter = croniter(schedule, start_datetime)
return [iter.get_next(datetime) for _ in range(num_runs)]
|
30,517 |
def main():
params = {k: v for k, v in demisto.params().items() if v is not None}
chosen_urls = []
params['feed_url_to_config'] = {}
sources = params['feed_source']
if 'Last 30 Days' in sources:
params['feed_url_to_config']['https://feodotracker.abuse.ch/downloads/ipblocklist.csv'] = {
"indicator_type": FeedIndicatorType.IP,
"indicator": {
"regex": r"^.+,(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})",
"transform": "\\1"
},
"fields": [{
'creationdate': {
"regex": r"^(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})",
"transform": "\\1"
},
"port": {
"regex": r"^.+,.+,(\d{1,5}),",
"transform": "\\1"
},
"updatedate": {
"regex": r"^.+,.+,.+,(\d{4}-\d{2}-\d{2})",
"transform": "\\1"
},
"malwarefamily": {
"regex": r"^.+,.+,.+,.+,(.+)",
"transform": "\\1"
}
}],
}
chosen_urls.append('https://feodotracker.abuse.ch/downloads/ipblocklist.csv')
if 'Currently Active' in sources:
params['feed_url_to_config']["https://feodotracker.abuse.ch/downloads/ipblocklist_recommended.txt"] = {
"indicator_type": FeedIndicatorType.IP,
"indicator": {
"regex": r"^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})",
"transform": "\\1"
},
"ignore_regex": '#*'
}
chosen_urls.append('https://feodotracker.abuse.ch/downloads/ipblocklist_recommended.txt')
params['ignore_regex'] = '#'
params['url'] = chosen_urls
params['custom_fields_mapping'] = {
"creationdate": "creationdate",
"port": "port",
"updatedate": "updatedate",
"malwarefamily": "malwarefamily"
}
# Call the main execution of the HTTP API module.
feed_main('Feodo Tracker IP Blocklist Feed', params, 'feodotrackeripblocklist-')
|
def main():
params = {k: v for k, v in demisto.params().items() if v is not None}
chosen_urls = []
params['feed_url_to_config'] = {}
sources = params['feed_source']
if 'Last 30 Days' in sources:
params['feed_url_to_config']['https://feodotracker.abuse.ch/downloads/ipblocklist.csv'] = {
"indicator_type": FeedIndicatorType.IP,
"indicator": {
"regex": r"^.+,(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})",
"transform": "\\1"
},
"fields": [{
'creationdate': {
"regex": r"^(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})",
"transform": "\\1"
},
"port": {
"regex": r"^.+,.+,(\d{1,5}),",
"transform": "\\1"
},
"updatedate": {
"regex": r"^.+,.+,.+,(\d{4}-\d{2}-\d{2})",
"transform": "\\1"
},
"malwarefamily": {
"regex": r"^.+,.+,.+,.+,(.+)",
"transform": "\\1"
}
}],
}
chosen_urls.append('https://feodotracker.abuse.ch/downloads/ipblocklist.csv')
if 'Currently Active' in sources:
params['feed_url_to_config']["https://feodotracker.abuse.ch/downloads/ipblocklist_recommended.txt"] = {
"indicator_type": FeedIndicatorType.IP,
"indicator": {
"regex": r"^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})",
"transform": "\\1"
},
"ignore_regex": '#*'
}
chosen_urls.append('https://feodotracker.abuse.ch/downloads/ipblocklist_recommended.txt')
params['ignore_regex'] = '#'
params['url'] = chosen_urls
params['custom_fields_mapping'] = {
"creationdate": "creationdate",
"port": "port",
"updatedate": "updatedate",
"malwarefamily": "malwarefamily"
}
# Call the main execution of the HTTP API module.
feed_main('Feodo Tracker IP Blocklist Feed', params, 'feodotracker-ipblocklist-')
|
55,649 |
def ssim(
img1: torch.Tensor,
img2: torch.Tensor,
window_size: int,
max_val: float = 1.0,
eps: float = 1e-12,
cropping: bool = False,
) -> torch.Tensor:
r"""Function that computes the Structural Similarity (SSIM) index map between two images.
Measures the (SSIM) index between each element in the input `x` and target `y`.
The index can be described as:
.. math::
\text{SSIM}(x, y) = \frac{(2\mu_x\mu_y+c_1)(2\sigma_{xy}+c_2)}
{(\mu_x^2+\mu_y^2+c_1)(\sigma_x^2+\sigma_y^2+c_2)}
where:
- :math:`c_1=(k_1 L)^2` and :math:`c_2=(k_2 L)^2` are two variables to
stabilize the division with weak denominator.
- :math:`L` is the dynamic range of the pixel-values (typically this is
:math:`2^{\#\text{bits per pixel}}-1`).
Args:
img1: the first input image with shape :math:`(B, C, H, W)`.
img2: the second input image with shape :math:`(B, C, H, W)`.
window_size: the size of the gaussian kernel to smooth the images.
max_val: the dynamic range of the images.
eps: Small value for numerically stability when dividing.
cropping: Whether to crop out the "valid" convolution area to match
the MATLAB implementation of original SSIM paper.
Returns:
The ssim index map with shape :math:`(B, C, H, W)`.
Examples:
>>> input1 = torch.rand(1, 4, 5, 5)
>>> input2 = torch.rand(1, 4, 5, 5)
>>> ssim_map = ssim(input1, input2, 5) # 1x4x5x5
"""
if not isinstance(img1, torch.Tensor):
raise TypeError(f"Input img1 type is not a torch.Tensor. Got {type(img1)}")
if not isinstance(img2, torch.Tensor):
raise TypeError(f"Input img2 type is not a torch.Tensor. Got {type(img2)}")
if not isinstance(max_val, float):
raise TypeError(f"Input max_val type is not a float. Got {type(max_val)}")
if not len(img1.shape) == 4:
raise ValueError(f"Invalid img1 shape, we expect BxCxHxW. Got: {img1.shape}")
if not len(img2.shape) == 4:
raise ValueError(f"Invalid img2 shape, we expect BxCxHxW. Got: {img2.shape}")
if not img1.shape == img2.shape:
raise ValueError(f"img1 and img2 shapes must be the same. Got: {img1.shape} and {img2.shape}")
# prepare kernel
kernel: torch.Tensor = get_gaussian_kernel2d((window_size, window_size), (1.5, 1.5)).unsqueeze(0)
# compute coefficients
C1: float = (0.01 * max_val) ** 2
C2: float = (0.03 * max_val) ** 2
# compute local mean per channel
mu1: torch.Tensor = filter2d(img1, kernel)
mu2: torch.Tensor = filter2d(img2, kernel)
if cropping:
height, width = kernel.shape[-2:]
cropping_shape: List[int] = _compute_padding([height, width])
mu1 = _crop(mu1, cropping_shape)
mu2 = _crop(mu2, cropping_shape)
mu1_sq = mu1 ** 2
mu2_sq = mu2 ** 2
mu1_mu2 = mu1 * mu2
mu_img1_sq = filter2d(img1 ** 2, kernel)
mu_img2_sq = filter2d(img2 ** 2, kernel)
mu_img1_img2 = filter2d(img1 * img2, kernel)
if cropping:
mu_img1_sq = _crop(mu_img1_sq, cropping_shape)
mu_img2_sq = _crop(mu_img2_sq, cropping_shape)
mu_img1_img2 = _crop(mu_img1_img2, cropping_shape)
# compute local sigma per channel
sigma1_sq = mu_img1_sq - mu1_sq
sigma2_sq = mu_img2_sq - mu2_sq
sigma12 = mu_img1_img2 - mu1_mu2
# compute the similarity index map
num: torch.Tensor = (2.0 * mu1_mu2 + C1) * (2.0 * sigma12 + C2)
den: torch.Tensor = (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
return num / (den + eps)
|
def ssim(
img1: torch.Tensor,
img2: torch.Tensor,
window_size: int,
max_val: float = 1.0,
eps: float = 1e-12,
cropping: bool = False,
) -> torch.Tensor:
r"""Function that computes the Structural Similarity (SSIM) index map between two images.
Measures the (SSIM) index between each element in the input `x` and target `y`.
The index can be described as:
.. math::
\text{SSIM}(x, y) = \frac{(2\mu_x\mu_y+c_1)(2\sigma_{xy}+c_2)}
{(\mu_x^2+\mu_y^2+c_1)(\sigma_x^2+\sigma_y^2+c_2)}
where:
- :math:`c_1=(k_1 L)^2` and :math:`c_2=(k_2 L)^2` are two variables to
stabilize the division with weak denominator.
- :math:`L` is the dynamic range of the pixel-values (typically this is
:math:`2^{\#\text{bits per pixel}}-1`).
Args:
img1: the first input image with shape :math:`(B, C, H, W)`.
img2: the second input image with shape :math:`(B, C, H, W)`.
window_size: the size of the gaussian kernel to smooth the images.
max_val: the dynamic range of the images.
eps: Small value for numerically stability when dividing.
cropping: Whether to crop out the "valid" convolution area to match
the MATLAB implementation of original SSIM paper.
Returns:
The ssim index map with shape :math:`(B, C, H, W)`.
Examples:
>>> input1 = torch.rand(1, 4, 5, 5)
>>> input2 = torch.rand(1, 4, 5, 5)
>>> ssim_map = ssim(input1, input2, 5) # 1x4x5x5
"""
if not isinstance(img1, torch.Tensor):
raise TypeError(f"Input img1 type is not a torch.Tensor. Got {type(img1)}")
if not isinstance(img2, torch.Tensor):
raise TypeError(f"Input img2 type is not a torch.Tensor. Got {type(img2)}")
if not isinstance(max_val, float):
raise TypeError(f"Input max_val type is not a float. Got {type(max_val)}")
if not len(img1.shape) == 4:
raise ValueError(f"Invalid img1 shape, we expect BxCxHxW. Got: {img1.shape}")
if not len(img2.shape) == 4:
raise ValueError(f"Invalid img2 shape, we expect BxCxHxW. Got: {img2.shape}")
if not img1.shape == img2.shape:
raise ValueError(f"img1 and img2 shapes must be the same. Got: {img1.shape} and {img2.shape}")
# prepare kernel
kernel: torch.Tensor = get_gaussian_kernel2d((window_size, window_size), (1.5, 1.5)).unsqueeze(0)
# compute coefficients
C1: float = (0.01 * max_val) ** 2
C2: float = (0.03 * max_val) ** 2
# compute local mean per channel
mu1: torch.Tensor = filter2d(img1, kernel, pad="valid")
mu2: torch.Tensor = filter2d(img2, kernel, pad="valid")
if cropping:
height, width = kernel.shape[-2:]
cropping_shape: List[int] = _compute_padding([height, width])
mu1 = _crop(mu1, cropping_shape)
mu2 = _crop(mu2, cropping_shape)
mu1_sq = mu1 ** 2
mu2_sq = mu2 ** 2
mu1_mu2 = mu1 * mu2
mu_img1_sq = filter2d(img1 ** 2, kernel)
mu_img2_sq = filter2d(img2 ** 2, kernel)
mu_img1_img2 = filter2d(img1 * img2, kernel)
if cropping:
mu_img1_sq = _crop(mu_img1_sq, cropping_shape)
mu_img2_sq = _crop(mu_img2_sq, cropping_shape)
mu_img1_img2 = _crop(mu_img1_img2, cropping_shape)
# compute local sigma per channel
sigma1_sq = mu_img1_sq - mu1_sq
sigma2_sq = mu_img2_sq - mu2_sq
sigma12 = mu_img1_img2 - mu1_mu2
# compute the similarity index map
num: torch.Tensor = (2.0 * mu1_mu2 + C1) * (2.0 * sigma12 + C2)
den: torch.Tensor = (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
return num / (den + eps)
|
317 |
def bound(logp, *conditions, **kwargs):
"""
Bounds a log probability density with several conditions.
Parameters
----------
logp: float
*conditions: booleans
broadcast_conditions: bool (optional, default=True)
If True, broadcasts logp to match the largest shape of the conditions.
This is used e.g. in DiscreteUniform where logp is a scalar constant and the shape
is specified via the conditions.
If False, will return the same shape as logp.
This is used e.g. in Multinomial where broadcasting can lead to differences in the logp.
Returns
-------
logp with elements set to -inf where any condition is False
"""
# If called inside a model context, see if bounds check is disabled
model = modelcontext(kwargs.get("model"))
if model is not None:
if model.disable_bounds_check:
return logp
broadcast_conditions = kwargs.get("broadcast_conditions", True)
if broadcast_conditions:
alltrue = alltrue_elemwise
else:
alltrue = alltrue_scalar
return tt.switch(alltrue(conditions), logp, -np.inf)
|
def bound(logp, *conditions, **kwargs):
"""
Bounds a log probability density with several conditions.
Parameters
----------
logp: float
*conditions: booleans
broadcast_conditions: bool (optional, default=True)
If True, broadcasts logp to match the largest shape of the conditions.
This is used e.g. in DiscreteUniform where logp is a scalar constant and the shape
is specified via the conditions.
If False, will return the same shape as logp.
This is used e.g. in Multinomial where broadcasting can lead to differences in the logp.
Returns
-------
logp with elements set to -inf where any condition is False
"""
# If called inside a model context, see if bounds check is disabled
model = modelcontext(kwargs.get("model"))
if model is not None and model.disable_bounds_check:
if model.disable_bounds_check:
return logp
broadcast_conditions = kwargs.get("broadcast_conditions", True)
if broadcast_conditions:
alltrue = alltrue_elemwise
else:
alltrue = alltrue_scalar
return tt.switch(alltrue(conditions), logp, -np.inf)
|
14,709 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NetAtmo Thermostat."""
import pyatmo
homes_conf = config.get(CONF_HOMES)
conf = hass.data.get(DATA_NETATMO_CONFIG, {})
try:
home_data = HomeData(conf)
except pyatmo.NoDevice:
return
homes = []
rooms = {}
if homes_conf is not None:
for home_conf in homes_conf:
home = home_conf[CONF_NAME]
if home_conf[CONF_ROOMS] != []:
rooms[home] = home_conf[CONF_ROOMS]
homes.append(home)
else:
homes = home_data.get_home_names()
devices = []
for home in homes:
_LOGGER.debug("Setting up %s ...", home)
try:
room_data = ThermostatData(conf, home)
except pyatmo.NoDevice:
continue
for room_id in room_data.get_room_ids():
room_name = room_data.homedata.rooms[home][room_id]['name']
_LOGGER.debug("Setting up %s (%s) ...", room_name, room_id)
if home in rooms and room_name not in rooms[home]:
_LOGGER.debug("Excluding %s ...", room_name)
continue
_LOGGER.debug("Adding devices for room %s (%s) ...",
room_name, room_id)
devices.append(NetatmoThermostat(room_data, room_id))
add_entities(devices, True)
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NetAtmo Thermostat."""
import pyatmo
homes_conf = config.get(CONF_HOMES)
conf = hass.data.get(DATA_NETATMO_CONFIG, {})
try:
home_data = HomeData(conf)
except pyatmo.NoDevice:
return
homes = []
rooms = {}
if homes_conf is not None:
for home_conf in homes_conf:
home = home_conf[CONF_NAME]
if home_conf[CONF_ROOMS] != []:
rooms[home] = home_conf[CONF_ROOMS]
homes.append(home)
else:
homes = home_data.get_home_names()
devices = []
for home in homes:
_LOGGER.debug("Setting up %s ...", home)
try:
room_data = ThermostatData(auth, home)
except pyatmo.NoDevice:
continue
for room_id in room_data.get_room_ids():
room_name = room_data.homedata.rooms[home][room_id]['name']
_LOGGER.debug("Setting up %s (%s) ...", room_name, room_id)
if home in rooms and room_name not in rooms[home]:
_LOGGER.debug("Excluding %s ...", room_name)
continue
_LOGGER.debug("Adding devices for room %s (%s) ...",
room_name, room_id)
devices.append(NetatmoThermostat(room_data, room_id))
add_entities(devices, True)
|
42,972 |
def vibronic(
t: np.ndarray,
U1: np.ndarray,
r: np.ndarray,
U2: np.ndarray,
alpha: np.ndarray,
n_samples: int,
loss: float = 0.0,
) -> list:
r"""Generate samples for computing vibronic spectra.
This function applies two procedures for generating samples depending on the temperature at
which the vibronic spectra is computed. In the finite temperature procedure, 2N vacuum modes
are prepared with N being the number of normal modes in the molecule and the first and
second N modes correspond to the normal modes of the final and initial electronic states,
respectively. These 2N modes are treated with two-mode squeezing gates and then the following
gates are applied to the first N modes:
#. Interferometer ``U1``
#. Squeezing on all N modes with parameters ``r``
#. Interferometer ``U2``
#. Displacement on all N modes with parameters ``alpha``
In the last step, the number of photons in each of the 2N modes is measured.
In the zero temperature case, only N modes are considered. The four gates mentioned above are
applied to N vacuum states and then the number of photons in each mode is measured. This
makes the zero temperature sampling significantly faster than the finite temperature one.
However, to keep the sizes of the samples consistent and temperature independent, we add N
zeros to the end of each zero temperature sample at the very end of this function.
**Example usage:**
>>> t = np.array([0., 0., 0., 0., 0., 0., 0.])
>>> U1 = np.array(
>>> [[-0.07985219, 0.66041032, -0.19389188, 0.01340832, 0.70312675, -0.1208423, -0.10352726],
>>> [0.19216669, -0.12470466, -0.81320519, 0.52045174, -0.1066017, -0.06300751, -0.00376173],
>>> [0.60838109, 0.0835063, -0.14958816, -0.34291399, 0.06239828, 0.68753918, -0.07955415],
>>> [0.63690134, -0.03047939, 0.46585565, 0.50545897, 0.21194805, -0.20422433, 0.18516987],
>>> [0.34556293, 0.22562207, -0.1999159, -0.50280235, -0.25510781, -0.55793978, 0.40065893],
>>> [-0.03377431, -0.66280536, -0.14740447, -0.25725325, 0.6145946, -0.07128058, 0.29804963],
>>> [-0.24570365, 0.22402764, 0.003273, 0.19204683, -0.05125235, 0.3881131, 0.83623564],
>>> ]
>>> )
>>> r = np.array(
>>> [0.09721339, 0.07017918, 0.02083469, -0.05974357, -0.07487845, -0.1119975, -0.1866708]
>>> )
>>> U2 = np.array(
>>> [
>>> [-0.07012006, 0.14489772, 0.17593463, 0.02431155, -0.63151781, 0.61230046, 0.41087368],
>>> [0.5618538, -0.09931968, 0.04562272, 0.02158822, 0.35700706, 0.6614837, -0.326946],
>>> [-0.16560687, -0.7608465, -0.25644606, -0.54317241, -0.12822903, 0.12809274, -0.00597384],
>>> [0.01788782, 0.60430409, -0.19831443, -0.73270964, -0.06393682, 0.03376894, -0.23038293],
>>> [0.78640978, -0.11133936, 0.03160537, -0.09188782, -0.43483738, -0.4018141, 0.09582698],
>>> [-0.13664887, -0.11196486, 0.86353995, -0.19608061, -0.12313513, -0.08639263, -0.40251231],
>>> [-0.12060103, -0.01169781, -0.33937036, 0.34662981, -0.49895371, 0.03257453, -0.70709135],
>>> ]
>>> )
>>> alpha = np.array(
>>> [0.15938187, 0.10387399, 1.10301587, -0.26756921, 0.32194572, -0.24317402, 0.0436992]
>>> )
[[0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
Args:
t (array): two-mode squeezing parameters
U1 (array): first interferometer unitary matrix
r (array): squeezing parameters
U2 (array): second interferometer unitary matrix
alpha (array): displacement parameters
n_samples (int): number of samples to be generated
loss (float): fraction of generated photons that are lost while passing through device.
Parameter should range from ``loss=0`` (ideal noise-free GBS) to ``loss=1``.
Returns:
list[list[int]]: a list of samples from GBS
"""
if n_samples < 1:
raise ValueError("Number of samples must be at least one")
if not 0 <= loss <= 1:
raise ValueError("Loss parameter must take a value between zero and one")
n_modes = len(t)
# initialize engine
eng = sf.LocalEngine(backend="gaussian")
# initialize program objects
if np.all(t != 0):
gbs = sf.Program(n_modes * 2)
else:
gbs = sf.Program(n_modes)
# start circuit
# pylint: disable=expression-not-assigned,pointless-statement
with gbs.context as q:
# two-mode squeezing gates
if np.all(t != 0):
for i in range(n_modes):
sf.ops.S2gate(t[i]) | (q[i], q[i + n_modes])
# first interferometer
sf.ops.Interferometer(U1) | q[:n_modes]
# squeezing gates
for i in range(n_modes):
sf.ops.Sgate(r[i]) | q[i]
# second interferometer
sf.ops.Interferometer(U2) | q[:n_modes]
# displacement gates
for i in range(n_modes):
sf.ops.Dgate(alpha[i]) | q[i]
# apply loss
if loss:
for _q in q:
sf.ops.LossChannel(1 - loss) | _q
# measurement
sf.ops.MeasureFock() | q
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning, message="Cannot simulate non-")
# run the engine and generate samples
s = eng.run(gbs, run_options={"shots": n_samples}).samples
# end circuit
if n_samples == 1:
if np.all(t == 0):
s = np.pad(s, (0, n_modes), constant_values=(0)).tolist()
return [s]
if np.all(t == 0):
s = np.pad(s, ((0, 0), (0, n_modes)))
return s.tolist()
|
def vibronic(
t: np.ndarray,
U1: np.ndarray,
r: np.ndarray,
U2: np.ndarray,
alpha: np.ndarray,
n_samples: int,
loss: float = 0.0,
) -> list:
r"""Generate samples for computing vibronic spectra.
This function applies two procedures for generating samples depending on the temperature at
which the vibronic spectra is computed. In the finite temperature procedure, 2N vacuum modes
are prepared with N being the number of normal modes in the molecule. The first and
second N modes correspond to the normal modes of the final and initial electronic states,
respectively. These 2N modes are treated with two-mode squeezing gates and then the following
gates are applied to the first N modes:
#. Interferometer ``U1``
#. Squeezing on all N modes with parameters ``r``
#. Interferometer ``U2``
#. Displacement on all N modes with parameters ``alpha``
In the last step, the number of photons in each of the 2N modes is measured.
In the zero temperature case, only N modes are considered. The four gates mentioned above are
applied to N vacuum states and then the number of photons in each mode is measured. This
makes the zero temperature sampling significantly faster than the finite temperature one.
However, to keep the sizes of the samples consistent and temperature independent, we add N
zeros to the end of each zero temperature sample at the very end of this function.
**Example usage:**
>>> t = np.array([0., 0., 0., 0., 0., 0., 0.])
>>> U1 = np.array(
>>> [[-0.07985219, 0.66041032, -0.19389188, 0.01340832, 0.70312675, -0.1208423, -0.10352726],
>>> [0.19216669, -0.12470466, -0.81320519, 0.52045174, -0.1066017, -0.06300751, -0.00376173],
>>> [0.60838109, 0.0835063, -0.14958816, -0.34291399, 0.06239828, 0.68753918, -0.07955415],
>>> [0.63690134, -0.03047939, 0.46585565, 0.50545897, 0.21194805, -0.20422433, 0.18516987],
>>> [0.34556293, 0.22562207, -0.1999159, -0.50280235, -0.25510781, -0.55793978, 0.40065893],
>>> [-0.03377431, -0.66280536, -0.14740447, -0.25725325, 0.6145946, -0.07128058, 0.29804963],
>>> [-0.24570365, 0.22402764, 0.003273, 0.19204683, -0.05125235, 0.3881131, 0.83623564],
>>> ]
>>> )
>>> r = np.array(
>>> [0.09721339, 0.07017918, 0.02083469, -0.05974357, -0.07487845, -0.1119975, -0.1866708]
>>> )
>>> U2 = np.array(
>>> [
>>> [-0.07012006, 0.14489772, 0.17593463, 0.02431155, -0.63151781, 0.61230046, 0.41087368],
>>> [0.5618538, -0.09931968, 0.04562272, 0.02158822, 0.35700706, 0.6614837, -0.326946],
>>> [-0.16560687, -0.7608465, -0.25644606, -0.54317241, -0.12822903, 0.12809274, -0.00597384],
>>> [0.01788782, 0.60430409, -0.19831443, -0.73270964, -0.06393682, 0.03376894, -0.23038293],
>>> [0.78640978, -0.11133936, 0.03160537, -0.09188782, -0.43483738, -0.4018141, 0.09582698],
>>> [-0.13664887, -0.11196486, 0.86353995, -0.19608061, -0.12313513, -0.08639263, -0.40251231],
>>> [-0.12060103, -0.01169781, -0.33937036, 0.34662981, -0.49895371, 0.03257453, -0.70709135],
>>> ]
>>> )
>>> alpha = np.array(
>>> [0.15938187, 0.10387399, 1.10301587, -0.26756921, 0.32194572, -0.24317402, 0.0436992]
>>> )
[[0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
Args:
t (array): two-mode squeezing parameters
U1 (array): first interferometer unitary matrix
r (array): squeezing parameters
U2 (array): second interferometer unitary matrix
alpha (array): displacement parameters
n_samples (int): number of samples to be generated
loss (float): fraction of generated photons that are lost while passing through device.
Parameter should range from ``loss=0`` (ideal noise-free GBS) to ``loss=1``.
Returns:
list[list[int]]: a list of samples from GBS
"""
if n_samples < 1:
raise ValueError("Number of samples must be at least one")
if not 0 <= loss <= 1:
raise ValueError("Loss parameter must take a value between zero and one")
n_modes = len(t)
# initialize engine
eng = sf.LocalEngine(backend="gaussian")
# initialize program objects
if np.all(t != 0):
gbs = sf.Program(n_modes * 2)
else:
gbs = sf.Program(n_modes)
# start circuit
# pylint: disable=expression-not-assigned,pointless-statement
with gbs.context as q:
# two-mode squeezing gates
if np.all(t != 0):
for i in range(n_modes):
sf.ops.S2gate(t[i]) | (q[i], q[i + n_modes])
# first interferometer
sf.ops.Interferometer(U1) | q[:n_modes]
# squeezing gates
for i in range(n_modes):
sf.ops.Sgate(r[i]) | q[i]
# second interferometer
sf.ops.Interferometer(U2) | q[:n_modes]
# displacement gates
for i in range(n_modes):
sf.ops.Dgate(alpha[i]) | q[i]
# apply loss
if loss:
for _q in q:
sf.ops.LossChannel(1 - loss) | _q
# measurement
sf.ops.MeasureFock() | q
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning, message="Cannot simulate non-")
# run the engine and generate samples
s = eng.run(gbs, run_options={"shots": n_samples}).samples
# end circuit
if n_samples == 1:
if np.all(t == 0):
s = np.pad(s, (0, n_modes), constant_values=(0)).tolist()
return [s]
if np.all(t == 0):
s = np.pad(s, ((0, 0), (0, n_modes)))
return s.tolist()
|
24,982 |
def load_results(
base: str, pylint_home: str | Path = PYLINT_HOME
) -> LinterStats | None:
data_file = _get_pdata_path(base, 1, pylint_home)
try:
with open(data_file, "rb") as stream:
data = pickle.load(stream)
# TODO Remove in 3.0 # pylint: disable=fixme
if not isinstance(data, LinterStats):
warnings.warn(
f"Loaded the wrong type of stats {type(data)}, we need a "
f"LinterStats, this will become an error in 3.0.",
DeprecationWarning,
)
raise TypeError
return data
except Exception: # pylint: disable=broad-except
return None
|
def load_results(
base: str, pylint_home: str | Path = PYLINT_HOME
) -> LinterStats | None:
data_file = _get_pdata_path(base, 1, pylint_home)
try:
with open(data_file, "rb") as stream:
data = pickle.load(stream)
# TODO: 3.0: Remove deprecated if statement # pylint: disable=fixme
if not isinstance(data, LinterStats):
warnings.warn(
f"Loaded the wrong type of stats {type(data)}, we need a "
f"LinterStats, this will become an error in 3.0.",
DeprecationWarning,
)
raise TypeError
return data
except Exception: # pylint: disable=broad-except
return None
|
25,940 |
def load_arguments(self, _):
# Model imports
StorageAccountTypes = self.get_models('StorageAccountTypes')
DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes,', operation_group='disks')
SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
HyperVGenerationTypes, HyperVGeneration = self.get_models('HyperVGenerationTypes', 'HyperVGeneration')
DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes')
OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.")
image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name')
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
ultra_ssd_enabled_type = CLIArgumentType(
arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
scale_in_policy_type = CLIArgumentType(
nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')),
help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.'
)
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('for_upload', arg_type=get_three_state_flag(),
help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope))
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType')),
help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.')
c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.')
c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.')
operation_group = 'disks' if scope == 'disk' else 'snapshots'
c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group)))
c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.')
c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default, and it does not apply to Ultra disks.')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
c.argument('upload_size_bytes', type=int, min_api='2019-03-01',
help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520')
c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time')
c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('gallery_image_reference', help='ID of the shared galley image version from which to create a disk')
c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.')
c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra disks.')
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01',
help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed')
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes),
help="Storage caching type for the image's data disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
# endregion
# region Image Templates
with self.argument_context('image builder') as c:
ib_output_name_help = "Name of the image builder run output."
c.argument('location', get_location_type(self.cli_ctx))
c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."
" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'")
c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.")
c.argument('image_template_name', image_template_name_type, help="The name of the image template.")
c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image")
c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." '
'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.')
c.argument('output_name', help=ib_output_name_help)
c.ignore('destinations_lists', 'scripts_list', 'source_dict')
with self.argument_context('image builder create') as c:
ib_source_type = CLIArgumentType(arg_group="Image Source")
ib_customizer_type = CLIArgumentType(arg_group="Customizer")
ib_cutput_type = CLIArgumentType(arg_group="Output")
c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.")
c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/image-builder-json')
c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.')
# VM profile
c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)')
c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size')
c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name')
c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine')
# Image Source Arguments
c.argument('source', arg_type=ib_source_type)
c.argument('checksum', arg_type=ib_source_type)
c.argument('', arg_type=ib_source_type)
# Image Customizer Arguments
c.argument('scripts', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
# Image Output Arguments
c.argument('managed_image_destinations', arg_type=ib_cutput_type)
c.argument('shared_image_destinations', arg_type=ib_cutput_type)
c.argument('output_name', arg_type=ib_cutput_type)
with self.argument_context('image builder output') as c:
ib_sig_regions_help = "Space-separated list of regions to replicate the image version into."
ib_img_location_help = "Location where the customized image will be created."
c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.")
c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help)
c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.")
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help)
with self.argument_context('image builder output add') as c:
ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help']
ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"])
ib_default_loc_help = " Defaults to resource group's location."
c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help)
c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true')
c.argument('tags', arg_type=ib_artifact_tags_type)
c.ignore('location')
with self.argument_context('image builder customizer') as c:
ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart")
ib_win_update_type = CLIArgumentType(arg_group="Windows Update")
ib_script_type = CLIArgumentType(arg_group="Shell and Powershell")
ib_powershell_type = CLIArgumentType(arg_group="Powershell")
ib_file_customizer_type = CLIArgumentType(arg_group="File")
c.argument('customizer_name', help="Name of the customizer.")
c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType))
# Script Args
c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.")
c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.")
# Powershell Specific Args
c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers")
# Windows Restart Specific Args
c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.")
c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.")
c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m")
# Windows Update Specific Args
c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.')
c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)')
c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)')
# File Args
c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.")
c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image")
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE):
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine")
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(self.get_models('InGuestPatchMode')), min_api='2020-06-01',
help='Mode of in-guest patching to IaaS virtual machine. Possible values are: Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. The parameter --enable-agent and --enable-auto-update must be true')
c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.')
c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01')
c.argument('count', type=int, help='How many virtual machines you want to create.')
with self.argument_context('vm create', arg_group='Storage') as c:
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', deprecate_info=c.deprecate(expiration='3.0.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'])
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
with self.argument_context('vm image terms') as c:
c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted')
c.argument('publisher', help='Image publisher')
c.argument('offer', help='Image offer')
c.argument('plan', help='Image billing plan')
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
with self.argument_context('vm host') as c:
c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group")
c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host")
c.ignore('expand')
with self.argument_context('vm host create') as c:
c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int,
help="Fault domain of the host within a group. Allowed values: 0, 1, 2")
c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(),
help="Replace the host automatically if a failure occurs")
c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes),
help="The software license type that will be applied to the VMs deployed on the dedicated host.")
c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/en-us/pricing/details/virtual-machines/dedicated-host/")
with self.argument_context('vm host list') as c:
c.argument('host_group_name', id_part=None)
with self.argument_context('vm host group') as c:
c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group")
c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Specify whether virtual machines or virtual machine scale sets can be placed automatically '
'on the dedicated host group. Automatic placement means resources are allocated on dedicated '
'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to '
'false when not provided.')
with self.argument_context('vm host group create') as c:
c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int,
help="Number of fault domains that the host group can span.")
c.argument('zones', zone_type)
for scope in ["vm host", "vm host group"]:
with self.argument_context("{} create".format(scope)) as c:
location_type = get_location_type(self.cli_ctx)
custom_location_msg = " Otherwise, location will default to the resource group's location"
custom_location_type = CLIArgumentType(overrides=location_type,
help=location_type.settings["help"] + custom_location_msg)
c.argument('location', arg_type=custom_location_type)
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
c.argument('host_group', min_api='2020-06-01',
help='Name or ID of dedicated host group that the virtual machine scale set resides in')
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create') as c:
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group."
" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.")
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set")
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long')
c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.',
arg_type=get_enum_type(['Uniform', 'Flexible']), is_preview=True)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('automatic_repairs_grace_period', min_api='2018-10-01',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.')
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vmss update') as c:
protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01')
c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.")
c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).")
c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(),
help='Enable terminate notification')
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('scale_in_policy', scale_in_policy_type)
with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c:
c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs')
c.argument(
'automatic_repairs_grace_period',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.'
)
for scope in ['vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('terminate_notification_time', min_api='2019-03-01',
help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted')
for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
with self.argument_context(scope) as c:
c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix))
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss set-orchestration-service-state') as c:
c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.')
c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.')
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade',
options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')],
arg_type=get_three_state_flag(),
help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file")
with self.argument_context('{} stop'.format(scope)) as c:
c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.')
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if DiskStorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.')
c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01',
help='Names or IDs (space delimited) of disk encryption sets for data disks.')
c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.')
c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default.', default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access")
c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have")
c.ignore('identity_role_id')
with self.argument_context('vm auto-shutdown') as c:
c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.')
c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)')
c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730')
c.argument('webhook', help='The webhook URL to which the notification will be sent')
c.argument('location', validator=get_default_location_from_resource_group)
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.')
c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(),
help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.')
with self.argument_context('vm extension set') as c:
c.argument('vm_extension_name', name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'),
help='Name of the extension.', id_part=None)
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \
"To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \
"To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \
"For more information see the Azure Windows VM online docs."
c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'None']))
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True,
help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons')
with self.argument_context('vm update') as c:
c.argument('license_type', help=license_msg, arg_type=get_enum_type(
['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_ELS_6', 'None']))
with self.argument_context('vmss create') as c:
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.")
c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
with self.argument_context('sig update') as c:
c.ignore('gallery')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
with self.argument_context('sig image-version create') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('os_snapshot', help='Name or ID of OS disk snapshot')
c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots')
c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions",
arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01')
c.argument('target_region_encryption', nargs='+',
help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.')
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace,
help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. '
'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
# endregion
# region Proximity Placement Group
with self.argument_context('ppg', min_api='2018-04-01') as c:
c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.")
with self.argument_context('ppg create', min_api='2018-04-01') as c:
c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.")
c.argument('tags', tags_type)
with self.argument_context('ppg show', min_api='2019-07-01') as c:
c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.')
for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'),
('vm availability-set create', 'availability set'),
('vm update', 'VM'), ('vmss update', 'VMSS'),
('vm availability-set update', 'availability set')]:
with self.argument_context(scope, min_api='2018-04-01') as c:
c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item),
validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added.
# endregion
# region VM Monitor
with self.argument_context('vm monitor log show') as c:
c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.")
c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.")
with self.argument_context('vm monitor metrics') as c:
c.argument('metricnamespace', options_list=['--namespace'],
help='Namespace to query metric definitions for.')
with self.argument_context('vm monitor metrics tail') as c:
from azure.mgmt.monitor.models import AggregationType
c.extra('resource_group_name', required=True)
c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
c.argument('metadata', action='store_true')
c.argument('dimension', nargs='*', validator=validate_metric_dimension)
c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*')
c.argument('metrics', nargs='*')
c.argument('orderby',
help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc')
c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.')
c.argument('filters', options_list=['--filter'])
c.argument('metric_namespace', options_list=['--namespace'])
with self.argument_context('vm monitor metrics tail', arg_group='Time') as c:
c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.'))
c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.'))
c.argument('offset', type=get_period_type(as_timedelta=True))
c.argument('interval', arg_group='Time', type=get_period_type())
with self.argument_context('vm monitor metrics list-definitions') as c:
c.extra('resource_group_name', required=True)
c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
# endregion
# region disk encryption set
with self.argument_context('disk-encryption-set') as c:
c.argument('disk_encryption_set_name', disk_encryption_set_name)
c.argument('key_url', help='URL pointing to a key or secret in KeyVault.')
c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.')
c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']),
help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endregion
# region DiskAccess
with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c:
c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endRegion
|
def load_arguments(self, _):
# Model imports
StorageAccountTypes = self.get_models('StorageAccountTypes')
DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes,', operation_group='disks')
SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
HyperVGenerationTypes, HyperVGeneration = self.get_models('HyperVGenerationTypes', 'HyperVGeneration')
DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes')
OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.")
image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name')
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
ultra_ssd_enabled_type = CLIArgumentType(
arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
scale_in_policy_type = CLIArgumentType(
nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')),
help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.'
)
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('for_upload', arg_type=get_three_state_flag(),
help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope))
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType')),
help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.')
c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.')
c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.')
operation_group = 'disks' if scope == 'disk' else 'snapshots'
c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group)))
c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.')
c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default, and it does not apply to Ultra disks.')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
c.argument('upload_size_bytes', type=int, min_api='2019-03-01',
help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520')
c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time')
c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('gallery_image_reference', help='ID of the shared galley image version from which to create a disk')
c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.')
c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra disks.')
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01',
help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed')
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes),
help="Storage caching type for the image's data disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
# endregion
# region Image Templates
with self.argument_context('image builder') as c:
ib_output_name_help = "Name of the image builder run output."
c.argument('location', get_location_type(self.cli_ctx))
c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."
" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'")
c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.")
c.argument('image_template_name', image_template_name_type, help="The name of the image template.")
c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image")
c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." '
'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.')
c.argument('output_name', help=ib_output_name_help)
c.ignore('destinations_lists', 'scripts_list', 'source_dict')
with self.argument_context('image builder create') as c:
ib_source_type = CLIArgumentType(arg_group="Image Source")
ib_customizer_type = CLIArgumentType(arg_group="Customizer")
ib_cutput_type = CLIArgumentType(arg_group="Output")
c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.")
c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/image-builder-json')
c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.')
# VM profile
c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)')
c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size')
c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name')
c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine')
# Image Source Arguments
c.argument('source', arg_type=ib_source_type)
c.argument('checksum', arg_type=ib_source_type)
c.argument('', arg_type=ib_source_type)
# Image Customizer Arguments
c.argument('scripts', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
# Image Output Arguments
c.argument('managed_image_destinations', arg_type=ib_cutput_type)
c.argument('shared_image_destinations', arg_type=ib_cutput_type)
c.argument('output_name', arg_type=ib_cutput_type)
with self.argument_context('image builder output') as c:
ib_sig_regions_help = "Space-separated list of regions to replicate the image version into."
ib_img_location_help = "Location where the customized image will be created."
c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.")
c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help)
c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.")
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help)
with self.argument_context('image builder output add') as c:
ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help']
ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"])
ib_default_loc_help = " Defaults to resource group's location."
c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help)
c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true')
c.argument('tags', arg_type=ib_artifact_tags_type)
c.ignore('location')
with self.argument_context('image builder customizer') as c:
ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart")
ib_win_update_type = CLIArgumentType(arg_group="Windows Update")
ib_script_type = CLIArgumentType(arg_group="Shell and Powershell")
ib_powershell_type = CLIArgumentType(arg_group="Powershell")
ib_file_customizer_type = CLIArgumentType(arg_group="File")
c.argument('customizer_name', help="Name of the customizer.")
c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType))
# Script Args
c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.")
c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.")
# Powershell Specific Args
c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers")
# Windows Restart Specific Args
c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.")
c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.")
c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m")
# Windows Update Specific Args
c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.')
c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)')
c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)')
# File Args
c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.")
c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image")
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE):
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine")
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(self.get_models('InGuestPatchMode')), min_api='2020-06-01',
help='Mode of in-guest patching to IaaS virtual machine. Possible values are: Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. The parameter --enable-agent and --enable-auto-update must be true')
c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.')
c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01')
c.argument('count', type=int, help='Count of virtual machines you want to create. Can only be used with --name and --resource-group-name.')
with self.argument_context('vm create', arg_group='Storage') as c:
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', deprecate_info=c.deprecate(expiration='3.0.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'])
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
with self.argument_context('vm image terms') as c:
c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted')
c.argument('publisher', help='Image publisher')
c.argument('offer', help='Image offer')
c.argument('plan', help='Image billing plan')
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
with self.argument_context('vm host') as c:
c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group")
c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host")
c.ignore('expand')
with self.argument_context('vm host create') as c:
c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int,
help="Fault domain of the host within a group. Allowed values: 0, 1, 2")
c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(),
help="Replace the host automatically if a failure occurs")
c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes),
help="The software license type that will be applied to the VMs deployed on the dedicated host.")
c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/en-us/pricing/details/virtual-machines/dedicated-host/")
with self.argument_context('vm host list') as c:
c.argument('host_group_name', id_part=None)
with self.argument_context('vm host group') as c:
c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group")
c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Specify whether virtual machines or virtual machine scale sets can be placed automatically '
'on the dedicated host group. Automatic placement means resources are allocated on dedicated '
'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to '
'false when not provided.')
with self.argument_context('vm host group create') as c:
c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int,
help="Number of fault domains that the host group can span.")
c.argument('zones', zone_type)
for scope in ["vm host", "vm host group"]:
with self.argument_context("{} create".format(scope)) as c:
location_type = get_location_type(self.cli_ctx)
custom_location_msg = " Otherwise, location will default to the resource group's location"
custom_location_type = CLIArgumentType(overrides=location_type,
help=location_type.settings["help"] + custom_location_msg)
c.argument('location', arg_type=custom_location_type)
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
c.argument('host_group', min_api='2020-06-01',
help='Name or ID of dedicated host group that the virtual machine scale set resides in')
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create') as c:
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group."
" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.")
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set")
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long')
c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.',
arg_type=get_enum_type(['Uniform', 'Flexible']), is_preview=True)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('automatic_repairs_grace_period', min_api='2018-10-01',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.')
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vmss update') as c:
protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01')
c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.")
c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).")
c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(),
help='Enable terminate notification')
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('scale_in_policy', scale_in_policy_type)
with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c:
c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs')
c.argument(
'automatic_repairs_grace_period',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.'
)
for scope in ['vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('terminate_notification_time', min_api='2019-03-01',
help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted')
for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
with self.argument_context(scope) as c:
c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix))
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss set-orchestration-service-state') as c:
c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.')
c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.')
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade',
options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')],
arg_type=get_three_state_flag(),
help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file")
with self.argument_context('{} stop'.format(scope)) as c:
c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.')
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if DiskStorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.')
c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01',
help='Names or IDs (space delimited) of disk encryption sets for data disks.')
c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.')
c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default.', default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access")
c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have")
c.ignore('identity_role_id')
with self.argument_context('vm auto-shutdown') as c:
c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.')
c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)')
c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730')
c.argument('webhook', help='The webhook URL to which the notification will be sent')
c.argument('location', validator=get_default_location_from_resource_group)
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.')
c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(),
help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.')
with self.argument_context('vm extension set') as c:
c.argument('vm_extension_name', name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'),
help='Name of the extension.', id_part=None)
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \
"To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \
"To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \
"For more information see the Azure Windows VM online docs."
c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'None']))
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True,
help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons')
with self.argument_context('vm update') as c:
c.argument('license_type', help=license_msg, arg_type=get_enum_type(
['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_ELS_6', 'None']))
with self.argument_context('vmss create') as c:
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.")
c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
with self.argument_context('sig update') as c:
c.ignore('gallery')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
with self.argument_context('sig image-version create') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('os_snapshot', help='Name or ID of OS disk snapshot')
c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots')
c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions",
arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01')
c.argument('target_region_encryption', nargs='+',
help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.')
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace,
help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. '
'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
# endregion
# region Proximity Placement Group
with self.argument_context('ppg', min_api='2018-04-01') as c:
c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.")
with self.argument_context('ppg create', min_api='2018-04-01') as c:
c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.")
c.argument('tags', tags_type)
with self.argument_context('ppg show', min_api='2019-07-01') as c:
c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.')
for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'),
('vm availability-set create', 'availability set'),
('vm update', 'VM'), ('vmss update', 'VMSS'),
('vm availability-set update', 'availability set')]:
with self.argument_context(scope, min_api='2018-04-01') as c:
c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item),
validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added.
# endregion
# region VM Monitor
with self.argument_context('vm monitor log show') as c:
c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.")
c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.")
with self.argument_context('vm monitor metrics') as c:
c.argument('metricnamespace', options_list=['--namespace'],
help='Namespace to query metric definitions for.')
with self.argument_context('vm monitor metrics tail') as c:
from azure.mgmt.monitor.models import AggregationType
c.extra('resource_group_name', required=True)
c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
c.argument('metadata', action='store_true')
c.argument('dimension', nargs='*', validator=validate_metric_dimension)
c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*')
c.argument('metrics', nargs='*')
c.argument('orderby',
help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc')
c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.')
c.argument('filters', options_list=['--filter'])
c.argument('metric_namespace', options_list=['--namespace'])
with self.argument_context('vm monitor metrics tail', arg_group='Time') as c:
c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.'))
c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.'))
c.argument('offset', type=get_period_type(as_timedelta=True))
c.argument('interval', arg_group='Time', type=get_period_type())
with self.argument_context('vm monitor metrics list-definitions') as c:
c.extra('resource_group_name', required=True)
c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
# endregion
# region disk encryption set
with self.argument_context('disk-encryption-set') as c:
c.argument('disk_encryption_set_name', disk_encryption_set_name)
c.argument('key_url', help='URL pointing to a key or secret in KeyVault.')
c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.')
c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']),
help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endregion
# region DiskAccess
with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c:
c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endRegion
|
768 |
def test_GradientTable_btensor_calculation():
# Generate a gradient table without specifying b-tensors
gradients = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[3, 4, 0],
[5, 0, 12]], 'float')
gt = GradientTable(gradients)
# Check that the number of b tensors is correct
npt.assert_equal(gt.btens.shape[0], gt.bvals.shape[0])
for i, (bval, bvec, bten) in enumerate(zip(gt.bvals, gt.bvecs, gt.btens)):
# Check that the b tensor magnitude is correct
npt.assert_almost_equal(np.trace(bten), bval)
# Check that the b tensor orientation is correct
if bval != 0:
evals, evecs = np.linalg.eig(bten)
dot_prod = np.dot(np.real(evecs[:,np.argmax(evals)]), gt.bvecs[i])
npt.assert_almost_equal(np.abs(dot_prod), 1)
# Check btens input option 1
for btens in ['LTE', 'PTE', 'STE']:
gt = GradientTable(gradients, btens=btens)
# Check that the number of b tensors is correct
npt.assert_equal(gt.bvals.shape[0], gt.btens.shape[0])
for i, (bval, bvec, bten) in enumerate(zip(gt.bvals, gt.bvecs,
gt.btens)):
# Check that the b tensor magnitude is correct
npt.assert_almost_equal(np.trace(bten), bval)
# Check that the b tensor orientation is correct
if btens == 'LTE':
if bval != 0:
evals, evecs = np.linalg.eig(bten)
dot_prod = np.dot(np.real(evecs[:,np.argmax(evals)]),
gt.bvecs[i])
npt.assert_almost_equal(np.abs(dot_prod), 1)
# Check btens input option 2
btens = np.array(['LTE', 'PTE', 'STE', 'PTE', 'LTE', 'PTE'])
gt = GradientTable(gradients, btens=btens)
# Check that the number of b tensors is correct
npt.assert_equal(gt.bvals.shape[0], gt.btens.shape[0])
for i, (bval, bvec, bten) in enumerate(zip(gt.bvals, gt.bvecs,
gt.btens)):
# Check that the b tensor magnitude is correct
npt.assert_almost_equal(np.trace(bten), bval)
# Check that the b tensor orientation is correct
if btens[i] == 'LTE':
if bval != 0:
evals, evecs = np.linalg.eig(bten)
dot_prod = np.dot(np.real(evecs[:,np.argmax(evals)]),
gt.bvecs[i])
npt.assert_almost_equal(np.abs(dot_prod), 1)
# Check invalide
npt.assert_raises(ValueError, GradientTable, gradients=gradients,
btens='PPP')
npt.assert_raises(ValueError, GradientTable, gradients=gradients,
btens=np.zeros((10,10)))
|
def test_GradientTable_btensor_calculation():
# Generate a gradient table without specifying b-tensors
gradients = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[3, 4, 0],
[5, 0, 12]], 'float')
gt = GradientTable(gradients)
# Check that the number of b tensors is correct
npt.assert_equal(gt.btens.shape[0], gt.bvals.shape[0])
for i, (bval, bvec, bten) in enumerate(zip(gt.bvals, gt.bvecs, gt.btens)):
# Check that the b tensor magnitude is correct
npt.assert_almost_equal(np.trace(bten), bval)
# Check that the b tensor orientation is correct
if bval != 0:
evals, evecs = np.linalg.eig(bten)
dot_prod = np.dot(np.real(evecs[:,np.argmax(evals)]), gt.bvecs[i])
npt.assert_almost_equal(np.abs(dot_prod), 1)
# Check btens input option 1
for btens in ['LTE', 'PTE', 'STE']:
gt = GradientTable(gradients, btens=btens)
# Check that the number of b tensors is correct
npt.assert_equal(gt.bvals.shape[0], gt.btens.shape[0])
for i, (bval, bvec, bten) in enumerate(zip(gt.bvals, gt.bvecs,
gt.btens)):
# Check that the b tensor magnitude is correct
npt.assert_almost_equal(np.trace(bten), bval)
# Check that the b tensor orientation is correct
if btens == 'LTE':
if bval != 0:
evals, evecs = np.linalg.eig(bten)
dot_prod = np.dot(np.real(evecs[:,np.argmax(evals)]),
gt.bvecs[i])
npt.assert_almost_equal(np.abs(dot_prod), 1)
# Check btens input option 2
btens = np.array(['LTE', 'PTE', 'STE', 'PTE', 'LTE', 'PTE'])
gt = GradientTable(gradients, btens=btens)
# Check that the number of b tensors is correct
npt.assert_equal(gt.bvals.shape[0], gt.btens.shape[0])
for i, (bval, bvec, bten) in enumerate(zip(gt.bvals, gt.bvecs,
gt.btens)):
# Check that the b tensor magnitude is correct
npt.assert_almost_equal(np.trace(bten), bval)
# Check that the b tensor orientation is correct
if btens[i] == 'LTE':
if bval != 0:
evals, evecs = np.linalg.eig(bten)
dot_prod = np.dot(np.real(evecs[:,np.argmax(evals)]),
gt.bvecs[i])
npt.assert_almost_equal(np.abs(dot_prod), 1)
# Check invalid
npt.assert_raises(ValueError, GradientTable, gradients=gradients,
btens='PPP')
npt.assert_raises(ValueError, GradientTable, gradients=gradients,
btens=np.zeros((10,10)))
|
28,157 |
def test_missing_runs_raises(two_empty_temp_db_connections, some_paramspecs):
"""
Test that an error is raised if runs not present in the source DB are
attempted extracted
"""
source_conn, target_conn = two_empty_temp_db_connections
source_exp_1 = Experiment(conn=source_conn)
# make 5 runs in first experiment
exp_1_run_ids = []
for _ in range(5):
source_dataset = DataSet(conn=source_conn, exp_id=source_exp_1.exp_id)
exp_1_run_ids.append(source_dataset.run_id)
for ps in some_paramspecs[2].values():
source_dataset.add_parameter(ps)
for val in range(10):
source_dataset.add_result({ps.name: val
for ps in some_paramspecs[2].values()})
source_dataset.mark_complete()
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
run_ids = [1, 8, 5, 3, 2, 4, 4, 4, 7, 8]
wrong_ids = [8, 7, 8]
expected_err = ("Error: not all run_ids exist in the source database. "
"The following run(s) is/are not present: "
f"{wrong_ids}")
with pytest.raises(ValueError, match=re.escape(expected_err)):
extract_runs_into_db(source_path, target_path, *run_ids)
|
def test_missing_runs_raises(two_empty_temp_db_connections, some_paramspecs):
"""
Test that an error is raised if runs not present in the source DB are
attempted to be extracted
"""
source_conn, target_conn = two_empty_temp_db_connections
source_exp_1 = Experiment(conn=source_conn)
# make 5 runs in first experiment
exp_1_run_ids = []
for _ in range(5):
source_dataset = DataSet(conn=source_conn, exp_id=source_exp_1.exp_id)
exp_1_run_ids.append(source_dataset.run_id)
for ps in some_paramspecs[2].values():
source_dataset.add_parameter(ps)
for val in range(10):
source_dataset.add_result({ps.name: val
for ps in some_paramspecs[2].values()})
source_dataset.mark_complete()
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
run_ids = [1, 8, 5, 3, 2, 4, 4, 4, 7, 8]
wrong_ids = [8, 7, 8]
expected_err = ("Error: not all run_ids exist in the source database. "
"The following run(s) is/are not present: "
f"{wrong_ids}")
with pytest.raises(ValueError, match=re.escape(expected_err)):
extract_runs_into_db(source_path, target_path, *run_ids)
|
8,440 |
def identify_spectrum_format(filename):
""" Attempt to identify a spectrum file format
Given a filename, attempts to identify a valid file format
from the list of registered specutils loaders. Essentially a wrapper for
`~astropy.io.registry.identify_format` setting origin to `read` and
data_class to `Spectrum1D`.
Parameters
----------
filename : str
The absolute filename to the object
Returns
-------
valid_format : list | str
A list of valid file formats. If only one valid format found, returns
just that element.
"""
# check for valid string input
if not isinstance(filename, (str, pathlib.Path)) or not os.path.isfile(filename):
raise ValueError(f'{filename} is not a valid string path to a file')
# identify the file format
valid_format = io_registry.identify_format(
'read', Spectrum1D, filename, None, {}, {})
if valid_format and len(valid_format) == 1:
return valid_format[0]
return valid_format
|
def identify_spectrum_format(filename):
""" Attempt to identify a spectrum file format
Given a filename, attempts to identify a valid file format
from the list of registered specutils loaders. Essentially a wrapper for
`~astropy.io.registry.identify_format` setting origin to `read` and
data_class to `Spectrum1D`.
Parameters
----------
filename : str
The absolute filename to the object
Returns
-------
valid_format : list, str
A list of valid file formats. If only one valid format found, returns
just that element.
"""
# check for valid string input
if not isinstance(filename, (str, pathlib.Path)) or not os.path.isfile(filename):
raise ValueError(f'{filename} is not a valid string path to a file')
# identify the file format
valid_format = io_registry.identify_format(
'read', Spectrum1D, filename, None, {}, {})
if valid_format and len(valid_format) == 1:
return valid_format[0]
return valid_format
|
24,862 |
def my_func(self):
"""This is a docstring.
:returns: An object
:rtype: :class:`mymodule.Class`
"""
return mymodule.Class()
|
def my_func(self):
"""finds_sphinx_return_custom_class
:returns: An object
:rtype: :class:`mymodule.Class`
"""
return mymodule.Class()
|
31,972 |
def main():
args = demisto.args()
date_value = args.get('value')
formatter = args.get('formatter')
demisto.results(date_to_epoch(date_value, formatter))
|
def main():
args = demisto.args()
date_value = args['value']
formatter = args.get('formatter')
demisto.results(date_to_epoch(date_value, formatter))
|
41,487 |
def plot_results(ax, mutests, tests, test_size=0.05):
cls_obs = np.array([test[0] for test in tests]).flatten()
cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)]
ax.plot(mutests, cls_obs, c='k')
for i, c in zip(range(5), ['k', 'k', 'k', 'k', 'k']):
ax.plot(mutests, cls_exp[i], c=c, linestyle='dotted' if i != 2 else 'dashed')
ax.fill_between(mutests, cls_exp[0], cls_exp[-1], facecolor='y')
ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='g')
ax.plot(mutests, [test_size] * len(mutests), c='r')
ax.set_ylim(0, 1)
|
def plot_results(ax, mutests, tests, test_size=0.05):
cls_obs = np.array([test[0] for test in tests]).flatten()
cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)]
ax.plot(mutests, cls_obs, c='k')
for i, c in zip(range(5), ['k', 'k', 'k', 'k', 'k']):
ax.plot(mutests, cls_exp[i], c=c, linestyle='dotted' if i != 2 else 'dashed')
ax.fill_between(mutests, cls_exp[0], cls_exp[-1], facecolor='y')
ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='g')
ax.plot(mutests, [test_size] * len(mutests), c='red')
ax.set_ylim(0, 1)
|
10,334 |
def stringc(text, color, for_prompt=False):
"""String in color."""
if ANSIBLE_COLOR:
color_code = parsecolor(color)
fmt = u"\033[%sm%s\033[0m"
if for_prompt:
fmt = u"\001\033[%sm\002%s\001\033[0m\002"
return u"\n".join([fmt % (color_code, t) for t in text.split(u'\n')])
else:
return text
|
def stringc(text, color, for_prompt=False):
"""String in color."""
if ANSIBLE_COLOR:
color_code = parsecolor(color)
fmt = u"\033[%sm%s\033[0m"
if wrap_nonvisible_chars:
fmt = u"\001\033[%sm\002%s\001\033[0m\002"
return u"\n".join([fmt % (color_code, t) for t in text.split(u'\n')])
else:
return text
|
9,357 |
def test_wrap_var_string():
assert isinstance(wrap_var('foo'), AnsibleUnsafe)
assert isinstance(wrap_var(u'foo'), AnsibleUnsafe)
if PY3:
assert not isinstance(wrap_var(b'foo'), AnsibleUnsafe)
else:
assert isinstance(wrap_var(b'foo'), AnsibleUnsafe)
|
def test_wrap_var_string():
assert isinstance(wrap_var('foo'), AnsibleUnsafe)
assert isinstance(wrap_var(u'foo'), AnsibleUnsafe)
if PY3:
assert isinstance(wrap_var(b'foo'), type(b''))
else:
assert isinstance(wrap_var(b'foo'), AnsibleUnsafe)
|
1,230 |
def test_write_morph_data():
"""Test write_morph_data edge cases"""
values = np.arange(20, dtype='>f4')
okay_shapes = [(20,), (20, 1), (20, 1, 1), (1, 20)]
bad_shapes = [(10, 2), (1, 1, 20, 1, 1)]
big_num = np.iinfo('i4').max + 1
with InTemporaryDirectory():
for shape in okay_shapes:
write_morph_data('test.curv', values.reshape(shape))
# Check ordering is preserved, regardless of shape
assert np.array_equal(read_morph_data('test.curv') ,values)
with pytest.raises(ValueError):
write_morph_data('test.curv', np.zeros(shape), big_num)
# Windows 32-bit overflows Python int
if np.dtype(np.int) != np.dtype(np.int32):
with pytest.raises(ValueError):
write_morph_data('test.curv', strided_scalar((big_num,)))
for shape in bad_shapes:
with pytest.raises(ValueError):
write_morph_data('test.curv', values.reshape(shape))
|
def test_write_morph_data():
"""Test write_morph_data edge cases"""
values = np.arange(20, dtype='>f4')
okay_shapes = [(20,), (20, 1), (20, 1, 1), (1, 20)]
bad_shapes = [(10, 2), (1, 1, 20, 1, 1)]
big_num = np.iinfo('i4').max + 1
with InTemporaryDirectory():
for shape in okay_shapes:
write_morph_data('test.curv', values.reshape(shape))
# Check ordering is preserved, regardless of shape
assert np.array_equal(read_morph_data('test.curv'), values)
with pytest.raises(ValueError):
write_morph_data('test.curv', np.zeros(shape), big_num)
# Windows 32-bit overflows Python int
if np.dtype(np.int) != np.dtype(np.int32):
with pytest.raises(ValueError):
write_morph_data('test.curv', strided_scalar((big_num,)))
for shape in bad_shapes:
with pytest.raises(ValueError):
write_morph_data('test.curv', values.reshape(shape))
|
5,441 |
def test_write_default():
"""
Test writing a default setting
"""
mock = MagicMock()
with patch.dict(macdefaults.__salt__, {"cmd.run_all": mock}):
macdefaults.write("com.apple.CrashReporter", "DialogType", "Server")
mock.assert_called_once_with(
'defaults write "com.apple.CrashReporter" "DialogType" -string' ' "Server"',
runas=None,
)
|
def test_write_default():
"""
Test writing a default setting
"""
mock = MagicMock()
with patch.dict(macdefaults.__salt__, {"cmd.run_all": mock}):
macdefaults.write("com.apple.CrashReporter", "DialogType", "Server")
mock.assert_called_once_with(
'defaults write "com.apple.CrashReporter" "DialogType" -string "Server"',
runas=None,
)
|
11,650 |
def _clean_paths(root: pathlib.Path) -> None:
def issubdir(path):
"""Whether path is equal to or is a subdirectory of root."""
path = pathlib.PurePath(path)
return path == root or any(parent == root for parent in path.parents)
def subdirs(*suffixes):
"""Valid subdirectories of root."""
paths = map(root.joinpath, suffixes)
return [str(p) for p in paths if p.is_dir()]
def isvalidpath_win(path):
"""Whether an element of PATH is "clean" on Windows."""
patterns = "*/cplex/*", "*/guirobi/*", "/windows/system32/*"
return any(map(pathlib.PurePath(path).match, patterns))
# Remove undesired paths from PYTHONPATH and add ilastik's submodules.
sys_path = list(filter(issubdir, sys.path))
sys_path += subdirs("ilastik/lazyflow", "ilastik/volumina", "ilastik/ilastik")
sys.path = sys_path
if sys.platform.startswith("win"):
# Empty PATH except for gurobi and CPLEX and add ilastik's installation paths.
path = list(filter(isvalidpath_win, _env_list("PATH")))
path += subdirs("Qt4/bin", "Library/bin", "Library/mingw-w64/bin", "python", "bin")
os.environ["PATH"] = os.pathsep.join(reversed(path))
else:
# Clean LD_LIBRARY_PATH and add ilastik's installation paths
# (gurobi and CPLEX are supposed to be located there as well).
ld_lib_path = list(filter(issubdir, _env_list("LD_LIBRARY_PATH")))
ld_lib_path += subdirs("lib")
os.environ["LD_LIBRARY_PATH"] = os.pathsep.join(reversed(ld_lib_path))
|
def _clean_paths(root: pathlib.Path) -> None:
def issubdir(path):
"""Whether path is equal to or is a subdirectory of root."""
path = pathlib.PurePath(path)
return path == root or any(parent == root for parent in path.parents)
def subdirs(*suffixes):
"""Valid subdirectories of root."""
paths = map(root.joinpath, suffixes)
return [str(p) for p in paths if p.is_dir()]
def isvalidpath_win(path):
"""Whether an element of PATH is "clean" on Windows."""
patterns = "*/cplex/*", "*/guirobi/*", "/windows/system32/*"
return any(map(pathlib.PurePath(path).match, patterns))
# Remove undesired paths from PYTHONPATH and add ilastik's submodules.
sys_path = list(filter(issubdir, sys.path))
sys_path += subdirs("ilastik/lazyflow", "ilastik/volumina", "ilastik/ilastik")
sys.path = sys_path
if sys.platform.startswith("win"):
# Empty PATH except for gurobi and CPLEX and add ilastik's installation paths.
path = list(filter(isvalidpath_win, _env_list("PATH")))
path += subdirs("Library/bin", "Library/mingw-w64/bin", "python", "bin")
os.environ["PATH"] = os.pathsep.join(reversed(path))
else:
# Clean LD_LIBRARY_PATH and add ilastik's installation paths
# (gurobi and CPLEX are supposed to be located there as well).
ld_lib_path = list(filter(issubdir, _env_list("LD_LIBRARY_PATH")))
ld_lib_path += subdirs("lib")
os.environ["LD_LIBRARY_PATH"] = os.pathsep.join(reversed(ld_lib_path))
|
31,055 |
def viper_download(client, args):
file_hash = args.get('file_hash')
if len(file_hash) == 64:
sample_info = client.sample_information(file_hash)
sample = sample_download(file_hash)
if sample.status_code == 200:
filename = sample_info['data']['name']
viper_id = sample_info['data']['id']
mime = sample_info['data']['mime']
file_type = sample_info['data']['type']
size = sample_info['data']['size']
table_object = [{"File Name": filename, "File Hash": file_hash,
"ViperID": viper_id, "MIME": mime, "File Type": file_type, "Size": size}]
context_object = {'Viper': {"Name": filename, "SHA256": file_hash,
"ViperID": viper_id, "MIME": mime, "Type": file_type, "Size": size}}
demisto.results({'ContentsFormat': formats['table'], 'Type': entryTypes['note'],
'Contents': table_object, "EntryContext": context_object})
demisto.results(fileResult(filename, sample.content))
else:
return_error('No valid sample found')
else:
return_error('Hash length is invalid.')
|
def viper_download(client, args):
file_hash = args.get('file_hash')
if len(file_hash) == 64:
sample_info = client.sample_information(file_hash)
sample = sample_download(file_hash)
if sample.status_code == 200:
filename = sample_info['data']['name']
viper_id = sample_info['data']['id']
mime = sample_info['data']['mime']
file_type = sample_info['data']['type']
size = sample_info['data']['size']
table_object = [{"File Name": filename, "File Hash": file_hash,
"ViperID": viper_id, "MIME": mime, "File Type": file_type, "Size": size}]
context_object = {'Viper': {"Name": filename, "SHA256": file_hash,
"ViperID": viper_id, "MIME": mime, "Type": file_type, "Size": size}}
demisto.results({'ContentsFormat': formats['table'], 'Type': entryTypes['note'],
'Contents': table_object, "EntryContext": context_object})
demisto.results(fileResult(filename, sample.content))
else:
return_error('No valid sample found')
else:
raise DemistoException('Hash length is invalid.')
|
8,321 |
def abs_nix_path(x) -> str:
xs = x.split("=", 1)
if len(xs) == 1:
return _maybe_abspath(x)
return xs[0] + "=" + _maybe_abspath(xs[1])
|
def abs_nix_path(x: str) -> str:
xs = x.split("=", 1)
if len(xs) == 1:
return _maybe_abspath(x)
return xs[0] + "=" + _maybe_abspath(xs[1])
|
12,602 |
def construct_coverage_config(
source_roots: SourceRoots, python_files: List[str], test_time: Optional[bool] = False,
) -> str:
# A map from source root stripped source to its source root. eg:
# {'pants/testutil/subsystem/util.py': 'src/python'}
# This is so coverage reports referencing /chroot/path/pants/testutil/subsystem/util.py can be mapped
# back to the actual sources they reference when merging coverage reports.
init_files = list(identify_missing_init_files(list(python_files)))
def source_root_stripped_source_and_source_root(file_name):
source_root = source_roots.find_by_path(file_name)
source_root_stripped_path = file_name[len(source_root.path) + 1 :]
return (source_root_stripped_path, source_root.path)
source_to_target_base = dict(
source_root_stripped_source_and_source_root(filename)
for filename in sorted(python_files) + init_files
)
config_parser = configparser.ConfigParser()
config_parser.read_file(StringIO(DEFAULT_COVERAGE_CONFIG))
ensure_section(config_parser, "run")
config_parser.set("run", "plugins", COVERAGE_PLUGIN_MODULE_NAME)
config_parser.add_section(COVERAGE_PLUGIN_MODULE_NAME)
config_parser.set(
COVERAGE_PLUGIN_MODULE_NAME, "source_to_target_base", json.dumps(source_to_target_base)
)
config_parser.set(COVERAGE_PLUGIN_MODULE_NAME, "test_time", json.dumps(test_time))
config = StringIO()
config_parser.write(config)
return config.getvalue()
|
def construct_coverage_config(
source_roots: SourceRoots, python_files: List[str], test_time: Optional[bool] = False,
) -> str:
# A map from source root stripped source to its source root. eg:
# {'pants/testutil/subsystem/util.py': 'src/python'}
# This is so coverage reports referencing /chroot/path/pants/testutil/subsystem/util.py can be mapped
# back to the actual sources they reference when merging coverage reports.
init_files = identify_missing_init_files(python_files)
def source_root_stripped_source_and_source_root(file_name):
source_root = source_roots.find_by_path(file_name)
source_root_stripped_path = file_name[len(source_root.path) + 1 :]
return (source_root_stripped_path, source_root.path)
source_to_target_base = dict(
source_root_stripped_source_and_source_root(filename)
for filename in sorted(python_files) + init_files
)
config_parser = configparser.ConfigParser()
config_parser.read_file(StringIO(DEFAULT_COVERAGE_CONFIG))
ensure_section(config_parser, "run")
config_parser.set("run", "plugins", COVERAGE_PLUGIN_MODULE_NAME)
config_parser.add_section(COVERAGE_PLUGIN_MODULE_NAME)
config_parser.set(
COVERAGE_PLUGIN_MODULE_NAME, "source_to_target_base", json.dumps(source_to_target_base)
)
config_parser.set(COVERAGE_PLUGIN_MODULE_NAME, "test_time", json.dumps(test_time))
config = StringIO()
config_parser.write(config)
return config.getvalue()
|
36,483 |
def _find_and_load_unlocked(name, import_):
path = None
parent = name.rpartition('.')[0]
if parent:
if parent not in sys.modules:
_call_with_frames_removed(import_, parent)
# Crazy side-effects!
if name in sys.modules:
return sys.modules[name]
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {!r} is not a package').format(name, parent)
raise ModuleNotFoundError(msg, name=name) from None
spec = _find_spec(name, path)
if spec is None:
raise ModuleNotFoundError(_ERR_MSG.format(name), name=name)
else:
module = _load_unlocked(spec)
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
child = name.rpartition('.')[2]
try:
setattr(parent_module, child, module)
except AttributeError:
msg = (f"Can't set child package '{child}' on "
f"parent '{parent}'")
_warnings.warn(msg, ImportWarning)
return module
|
def _find_and_load_unlocked(name, import_):
path = None
parent = name.rpartition('.')[0]
if parent:
if parent not in sys.modules:
_call_with_frames_removed(import_, parent)
# Crazy side-effects!
if name in sys.modules:
return sys.modules[name]
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {!r} is not a package').format(name, parent)
raise ModuleNotFoundError(msg, name=name) from None
spec = _find_spec(name, path)
if spec is None:
raise ModuleNotFoundError(_ERR_MSG.format(name), name=name)
else:
module = _load_unlocked(spec)
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
child = name.rpartition('.')[2]
try:
setattr(parent_module, child, module)
except AttributeError:
msg = (f"Cannot set an attribute on {parent!r} for child module {child!r}"
f"parent '{parent}'")
_warnings.warn(msg, ImportWarning)
return module
|
49,860 |
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated from meteorological data using a function for a single diode model, e.g.,
:py:func:`~pvlib.pvsystem.calcparams_desoto`.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
31,939 |
def retrieve_command_results_of_list_commands(response: Dict[str, Any], base_header: str,
page: int, limit: int, outputs_prefix: str) -> CommandResults:
"""
Retrieves the command results of list commands.
Args:
response (Dict[str,Any]): API response from Azure.
base_header: (str) Header prefix in the readable output.
page (int): Page number.
limit (int): Page size.
outputs_prefix (str): Command context outputs prefix.
Returns:
CommandResults: List Command results.
"""
response_kusto_dataset = KustoResponseDataSetV1(response)
total_rows = response_kusto_dataset.primary_results[0].rows_count
total_pages = total_rows // limit + 1
outputs = convert_kusto_response_to_dict(response_kusto_dataset, page, limit)
readable_header = format_header_for_list_commands(base_header,
total_rows, total_pages, page, limit)
readable_output = tableToMarkdown(readable_header,
outputs,
headers=['ClientActivityId', 'User', 'Text',
'Database', 'StartedOn',
'LastUpdatedOn',
'State'],
headerTransform=pascalToSpace)
command_results = CommandResults(
outputs_prefix=outputs_prefix,
outputs_key_field='ClientActivityId',
outputs=outputs,
raw_response=response,
readable_output=readable_output
)
return command_results
|
def retrieve_command_results_of_list_commands(response: Dict[str, Any], base_header: str,
page: int, limit: int, outputs_prefix: str) -> CommandResults:
"""
Retrieves the command results of list commands.
Args:
response (Dict[str,Any]): API response from Azure.
base_header: (str) Header prefix in the readable output.
page (int): Page number.
limit (int): Page size.
outputs_prefix (str): Command context outputs prefix.
Returns:
CommandResults: List Command results.
"""
response_kusto_dataset = KustoResponseDataSetV1(response)
total_rows = response_kusto_dataset.primary_results[0].rows_count
total_pages = total_rows // limit + (total_rows % limit != 0)
outputs = convert_kusto_response_to_dict(response_kusto_dataset, page, limit)
readable_header = format_header_for_list_commands(base_header,
total_rows, total_pages, page, limit)
readable_output = tableToMarkdown(readable_header,
outputs,
headers=['ClientActivityId', 'User', 'Text',
'Database', 'StartedOn',
'LastUpdatedOn',
'State'],
headerTransform=pascalToSpace)
command_results = CommandResults(
outputs_prefix=outputs_prefix,
outputs_key_field='ClientActivityId',
outputs=outputs,
raw_response=response,
readable_output=readable_output
)
return command_results
|
57,805 |
def main():
"""
PARSE AND VALIDATE FEED PARAMS
"""
params = demisto.params()
args = demisto.args()
api_key = params.get('api_key')
verify = not params.get('insecure', False)
feed_name = params.get('feed_name')
feed_tags = argToList(params.get('feedTags'))
tlp_color = params.get('tlp_color')
first_fetch_date = params.get('first_fetch_date', 'today')
if not first_fetch_date:
first_fetch_date = 'today'
first_fetch_date = arg_to_datetime(arg=first_fetch_date, arg_name='First fetch date')
command = demisto.command()
demisto.debug(f'Command being called is: {command}')
try:
client = Client(api_key, verify)
if command == 'test-module':
return_results(test_module(client, feed_name, first_fetch_date, feed_tags, tlp_color))
elif command == 'fetch-indicators':
indicators = fetch_indicators(client, feed_name, first_fetch_date, feed_tags, tlp_color)
for iter_ in batch(indicators, batch_size=2000):
demisto.createIndicators(iter_)
elif command == 'CiscoSMA-get-indicators':
return_results(get_indicators_command(client, args))
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
except Exception as err:
return_error(str(err))
|
def main():
"""
PARSE AND VALIDATE FEED PARAMS
"""
params = demisto.params()
args = demisto.args()
api_key = params.get('api_key')
verify = not params.get('insecure', False)
feed_name = params.get('feed_name')
feed_tags = argToList(params.get('feedTags'))
tlp_color = params.get('tlp_color')
first_fetch_date = params.get('first_fetch_date', 'today')
if not first_fetch_date:
first_fetch_date = 'today'
first_fetch_date = arg_to_datetime(arg=first_fetch_date, arg_name='First fetch date')
command = demisto.command()
demisto.debug(f'Command being called is: {command}')
try:
client = Client(api_key, verify)
if command == 'test-module':
return_results(test_module(client, feed_name, first_fetch_date, feed_tags, tlp_color))
elif command == 'fetch-indicators':
indicators = fetch_indicators(client, feed_name, first_fetch_date, feed_tags, tlp_color)
for iter_ in batch(indicators, batch_size=2000):
demisto.createIndicators(iter_)
elif command == 'CiscoSMA-get-indicators':
return_results(get_indicators_command(client, args))
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
except Exception as err:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
31,051 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# Parse parameters
api_key = demisto.params().get('apikey')
viper_project = demisto.params().get('viper_project')
base_url = urljoin(demisto.params()['url'], f'/api/v3/project/{viper_project}')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': f'Token {api_key}',
'Accept': 'application/json'
}
client = ViperClient(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'viper-download':
viper_download(client, demisto.args())
elif demisto.command() == 'viper-search':
viper_search(client, demisto.args())
# Log exceptions
except Exception as e:
# demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# Parse parameters
api_key = demisto.params().get('apikey')
viper_project = demisto.params().get('viper_project')
base_url = urljoin(demisto.params()['url'], f'/api/v3/project/{viper_project}')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': f'Token {api_key}',
'Accept': 'application/json'
}
client = ViperClient(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'viper-download':
viper_download(client, demisto.args())
elif demisto.command() == 'viper-search':
return_results(viper_search(client, demisto.args()))
# Log exceptions
except Exception as e:
# demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
31,005 |
def get_modified_files_for_testing(
git_diff: str,
) -> Tuple[List[str], List[str], List[str], bool, List[str], set, bool, bool]:
"""
Gets git diff string and filters those files into tests:
Args:
git_diff: a git diff output (with --name-only flag)
Returns:
modified_files: Modified YMLs for testing (Integrations, Scripts, Playbooks).
modified_tests: Test playbooks.
changed_common: Globally used YMLs (Like CommonServerPython).
is_conf_json: If Tests/Conf.json has been changed.
sample_tests: Files to test, Like the infrastructures files.
modified_metadata: Metadata files.
is_reputations_json: If any reputation file changed.
is_indicator_json: If any indicator file changed.
"""
sample_tests: Set[str] = set()
modified_metadata: Set[str] = set()
modified_files: Set[str] = set()
types_to_files: Dict[FileType, Set[str]] = create_type_to_file(git_diff)
# Checks if any common file represents in it
changed_common = get_common_files(
types_to_files.get(FileType.INTEGRATION, set()),
types_to_files.get(FileType.SCRIPT, set())
)
# Remove common files from the sets
for file_path in changed_common:
file_type = tools.find_type(file_path)
try:
types_to_files[file_type].remove(file_path)
except KeyError:
# Can be a python file that changed and now the yml representing. Will ignore
pass
# Sample tests are the remaining python files
sample_tests = sample_tests.union(types_to_files.get(FileType.PYTHON_FILE, set()))
# Modified files = YMLs of integrations, scripts and playbooks
modified_files = modified_files.union(
types_to_files.get(FileType.INTEGRATION, set()),
types_to_files.get(FileType.SCRIPT, set()),
types_to_files.get(FileType.PLAYBOOK, set()),
)
# Metadata packs
for file_path in types_to_files.get(FileType.METADATA, set()):
modified_metadata.add(tools.get_pack_name(file_path))
# Modified tests are test playbooks
modified_tests: Set[str] = types_to_files.get(FileType.TEST_PLAYBOOK, set())
# Booleans. If this kind of file is inside, its exists
is_conf_json = FileType.CONF_JSON in types_to_files
is_reputations_json = FileType.REPUTATION in types_to_files
is_indicator_json = FileType.INDICATOR_FIELD in types_to_files
return (
list(modified_files),
list(modified_tests),
list(changed_common),
is_conf_json,
list(sample_tests),
modified_metadata,
is_reputations_json,
is_indicator_json,
)
|
def get_modified_files_for_testing(
git_diff: str,
) -> Tuple[List[str], List[str], List[str], bool, List[str], set, bool, bool]:
"""
Gets git diff string and filters those files into tests:
Args:
git_diff: a git diff output (with --name-only flag)
Returns:
modified_files: Modified YMLs for testing (Integrations, Scripts, Playbooks).
modified_tests: Test playbooks.
changed_common: Globally used YMLs (Like CommonServerPython).
is_conf_json: If Tests/Conf.json has been changed.
sample_tests: Files to test, Like the infrastructures files.
modified_metadata: Metadata files.
is_reputations_json: If any reputation file changed.
is_indicator_json: If any indicator file changed.
"""
sample_tests: Set[str] = set()
modified_metadata: Set[str] = set()
modified_script_integration_playbook_files: Set[str] = set()
types_to_files: Dict[FileType, Set[str]] = create_type_to_file(git_diff)
# Checks if any common file represents in it
changed_common = get_common_files(
types_to_files.get(FileType.INTEGRATION, set()),
types_to_files.get(FileType.SCRIPT, set())
)
# Remove common files from the sets
for file_path in changed_common:
file_type = tools.find_type(file_path)
try:
types_to_files[file_type].remove(file_path)
except KeyError:
# Can be a python file that changed and now the yml representing. Will ignore
pass
# Sample tests are the remaining python files
sample_tests = sample_tests.union(types_to_files.get(FileType.PYTHON_FILE, set()))
# Modified files = YMLs of integrations, scripts and playbooks
modified_files = modified_files.union(
types_to_files.get(FileType.INTEGRATION, set()),
types_to_files.get(FileType.SCRIPT, set()),
types_to_files.get(FileType.PLAYBOOK, set()),
)
# Metadata packs
for file_path in types_to_files.get(FileType.METADATA, set()):
modified_metadata.add(tools.get_pack_name(file_path))
# Modified tests are test playbooks
modified_tests: Set[str] = types_to_files.get(FileType.TEST_PLAYBOOK, set())
# Booleans. If this kind of file is inside, its exists
is_conf_json = FileType.CONF_JSON in types_to_files
is_reputations_json = FileType.REPUTATION in types_to_files
is_indicator_json = FileType.INDICATOR_FIELD in types_to_files
return (
list(modified_files),
list(modified_tests),
list(changed_common),
is_conf_json,
list(sample_tests),
modified_metadata,
is_reputations_json,
is_indicator_json,
)
|
49,599 |
def to_zarr(
arr,
url,
component=None,
storage_options=None,
overwrite=False,
compute=True,
return_stored=False,
**kwargs,
):
"""Save array to the zarr storage format
See https://zarr.readthedocs.io for details about the format.
Parameters
----------
arr: dask.array
Data to store
url: Zarr Array or str or MutableMapping
Location of the data. A URL can include a protocol specifier like s3://
for remote data. Can also be any MutableMapping instance, which should
be serializable if used in multiple processes.
component: str or None
If the location is a zarr group rather than an array, this is the
subcomponent that should be created/over-written.
storage_options: dict
Any additional parameters for the storage backend (ignored for local
paths)
overwrite: bool
If given array already exists, overwrite=False will cause an error,
where overwrite=True will replace the existing data.
compute: bool
See :func:`~dask.array.store` for more details.
return_stored: bool
See :func:`~dask.array.store` for more details.
**kwargs:
Passed to the :func:`zarr.creation.create` function, e.g., compression options.
Raises
------
ValueError
If ``arr`` has unknown chunk sizes, which is not supported by Zarr.
See Also
--------
dask.array.store
dask.array.Array.compute_chunk_sizes
"""
import zarr
if np.isnan(arr.shape).any():
raise ValueError(
"Saving a dask array with unknown chunk sizes is not "
"currently supported by Zarr.%s" % unknown_chunk_message
)
if isinstance(url, zarr.Array):
z = url
if (
isinstance(z.store, (dict, MutableMapping))
and not callable(config.get("scheduler", ""))
and "distributed" in config.get("scheduler", "")
):
raise RuntimeError(
"Cannot store into in memory Zarr Array using "
"the Distributed Scheduler."
)
arr = arr.rechunk(z.chunks)
return arr.store(z, lock=False, compute=compute, return_stored=return_stored)
if not _check_regular_chunks(arr.chunks):
raise ValueError(
"Attempt to save array to zarr with irregular "
"chunking, please call `arr.rechunk(...)` first."
)
storage_options = storage_options or {}
if isinstance(url, str):
mapper = get_mapper(url, **storage_options)
else:
# assume the object passed is already a mapper
mapper = url
chunks = [c[0] for c in arr.chunks]
z = zarr.create(
shape=arr.shape,
chunks=chunks,
dtype=arr.dtype,
store=mapper,
path=component,
overwrite=overwrite,
**kwargs,
)
return arr.store(z, lock=False, compute=compute, return_stored=return_stored)
|
def to_zarr(
arr,
url,
component=None,
storage_options=None,
overwrite=False,
compute=True,
return_stored=False,
**kwargs,
):
"""Save array to the zarr storage format
See https://zarr.readthedocs.io for details about the format.
Parameters
----------
arr: dask.array
Data to store
url: Zarr Array or str or MutableMapping
Location of the data. A URL can include a protocol specifier like s3://
for remote data. Can also be any MutableMapping instance, which should
be serializable if used in multiple processes.
component: str or None
If the location is a zarr group rather than an array, this is the
subcomponent that should be created/over-written.
storage_options: dict
Any additional parameters for the storage backend (ignored for local
paths)
overwrite: bool
If given array already exists, overwrite=False will cause an error,
where overwrite=True will replace the existing data.
compute: bool
See :func:`~dask.array.store` for more details.
return_stored: bool
See :func:`~dask.array.store` for more details.
**kwargs:
Passed to the :func:`zarr.creation.create` function, e.g., compression options.
Raises
------
ValueError
If ``arr`` has unknown chunk sizes, which is not supported by Zarr.
See Also
--------
dask.array.store
dask.array.Array.compute_chunk_sizes
"""
import zarr
if np.isnan(arr.shape).any():
raise ValueError(
"Saving a dask array with unknown chunk sizes is not "
"currently supported by Zarr.%s" % unknown_chunk_message
)
if isinstance(url, zarr.Array):
z = url
if (
isinstance(z.store, (dict, MutableMapping))
and isinstance(config.get("scheduler", None), str)
and "distributed" in config.get("scheduler", "")
):
raise RuntimeError(
"Cannot store into in memory Zarr Array using "
"the Distributed Scheduler."
)
arr = arr.rechunk(z.chunks)
return arr.store(z, lock=False, compute=compute, return_stored=return_stored)
if not _check_regular_chunks(arr.chunks):
raise ValueError(
"Attempt to save array to zarr with irregular "
"chunking, please call `arr.rechunk(...)` first."
)
storage_options = storage_options or {}
if isinstance(url, str):
mapper = get_mapper(url, **storage_options)
else:
# assume the object passed is already a mapper
mapper = url
chunks = [c[0] for c in arr.chunks]
z = zarr.create(
shape=arr.shape,
chunks=chunks,
dtype=arr.dtype,
store=mapper,
path=component,
overwrite=overwrite,
**kwargs,
)
return arr.store(z, lock=False, compute=compute, return_stored=return_stored)
|
30,646 |
def fetch_incidents(client, last_run):
""" Callback to fetch incidents periodically """
last_fetch_time = last_run.get('last_fetch', datetime.utcnow().timestamp() - 60)
site, concentrator, map = get_site_params()
params = demisto.params()
tags = params.get('tags') or None
event_types = params.get('event_types')
zone_events = []
if 'all' in event_types or 'zone_event' in event_types:
zone_events = client.get_zone_events(concentrator=concentrator, map=map,
site=site, tags=tags, since=last_fetch_time)
device_events = []
if 'all' in event_types or 'device_event' in event_types:
device_events = client.get_device_events(concentrator=concentrator, map=map,
site=site, tags=tags,
since=last_fetch_time)
events = zone_events + device_events
incidents = []
for event in events:
event_time = int(event['time_s'])
incident = {
'name': event['event_type'],
'occurred': datetime.utcfromtimestamp(event_time).strftime(
DATE_FORMAT),
'rawJSON': json.dumps(event),
}
incidents.append(incident)
if event_time > last_fetch_time:
last_fetch_time = event_time
next_run = {'last_fetch': last_fetch_time}
return next_run, incidents
|
def fetch_incidents(client, last_run):
""" Callback to fetch incidents periodically """
last_fetch_time = last_run.get('last_fetch', datetime.utcnow().timestamp() - NEW_PARAM)
site, concentrator, map = get_site_params()
params = demisto.params()
tags = params.get('tags') or None
event_types = params.get('event_types')
zone_events = []
if 'all' in event_types or 'zone_event' in event_types:
zone_events = client.get_zone_events(concentrator=concentrator, map=map,
site=site, tags=tags, since=last_fetch_time)
device_events = []
if 'all' in event_types or 'device_event' in event_types:
device_events = client.get_device_events(concentrator=concentrator, map=map,
site=site, tags=tags,
since=last_fetch_time)
events = zone_events + device_events
incidents = []
for event in events:
event_time = int(event['time_s'])
incident = {
'name': event['event_type'],
'occurred': datetime.utcfromtimestamp(event_time).strftime(
DATE_FORMAT),
'rawJSON': json.dumps(event),
}
incidents.append(incident)
if event_time > last_fetch_time:
last_fetch_time = event_time
next_run = {'last_fetch': last_fetch_time}
return next_run, incidents
|
911 |
def diophantine(eq, param=symbols("t", integer=True), syms=None,
permute=False):
"""
Simplify the solution procedure of diophantine equation ``eq`` by
converting it into a product of terms which should equal zero.
Explanation
===========
For example, when solving, `x^2 - y^2 = 0` this is treated as
`(x + y)(x - y) = 0` and `x + y = 0` and `x - y = 0` are solved
independently and combined. Each term is solved by calling
``diop_solve()``. (Although it is possible to call ``diop_solve()``
directly, one must be careful to pass an equation in the correct
form and to interpret the output correctly; ``diophantine()`` is
the public-facing function to use in general.)
Output of ``diophantine()`` is a set of tuples. The elements of the
tuple are the solutions for each variable in the equation and
are arranged according to the alphabetic ordering of the variables.
e.g. For an equation with two variables, `a` and `b`, the first
element of the tuple is the solution for `a` and the second for `b`.
Usage
=====
``diophantine(eq, t, syms)``: Solve the diophantine
equation ``eq``.
``t`` is the optional parameter to be used by ``diop_solve()``.
``syms`` is an optional list of symbols which determines the
order of the elements in the returned tuple.
By default, only the base solution is returned. If ``permute`` is set to
True then permutations of the base solution and/or permutations of the
signs of the values will be returned when applicable.
Examples
========
>>> from sympy.solvers.diophantine import diophantine
>>> from sympy.abc import a, b
>>> eq = a**4 + b**4 - (2**4 + 3**4)
>>> diophantine(eq)
{(2, 3)}
>>> diophantine(eq, permute=True)
{(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)}
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.abc import x, y, z
>>> diophantine(x**2 - y**2)
{(t_0, -t_0), (t_0, t_0)}
>>> diophantine(x*(2*x + 3*y - z))
{(0, n1, n2), (t_0, t_1, 2*t_0 + 3*t_1)}
>>> diophantine(x**2 + 3*x*y + 4*x)
{(0, n1), (3*t_0 - 4, -t_0)}
See Also
========
diop_solve()
sympy.utilities.iterables.permute_signs
sympy.utilities.iterables.signed_permutations
"""
from sympy.utilities.iterables import (
subsets, permute_signs, signed_permutations)
eq = _sympify(eq)
if isinstance(eq, Eq):
eq = eq.lhs - eq.rhs
var = list(eq.expand(force=True).free_symbols)
var.sort(key=default_sort_key)
if syms:
if not is_sequence(syms):
raise TypeError(
'syms should be given as a sequence, e.g. a list')
syms = [i for i in syms if i in var]
if syms != var:
dict_sym_index = dict(zip(syms, range(len(syms))))
return {tuple([t[dict_sym_index[i]] for i in var])
for t in diophantine(eq, param, permute=True)}
n, d = eq.as_numer_denom()
if n.is_number:
return set()
if not d.is_number:
dsol = diophantine(d)
good = diophantine(n) - dsol
return {s for s in good if _mexpand(d.subs(zip(var, s)))}
else:
eq = n
eq = factor_terms(eq)
assert not eq.is_number
eq = eq.as_independent(*var, as_Add=False)[1]
# check for plausible integer non-solutions
ivar = {s: Dummy(integer=True) for s in var if not s.is_integer}
if eq.xreplace(ivar).is_integer is False:
return set()
nzvar = {s: Dummy(integer=True, zero=False) for s in var if s.is_zero is None}
if eq.xreplace(nzvar).is_integer is False:
# perhaps zero if 1 or more of the symbols is 0
rv = []
for i in subsets(nzvar):
zvar = {s: 0 for s in i}
if eq.xreplace(zvar).is_integer:
if len(i) == len(nzvar) and rv:
continue
rv.append(tuple(
[0 if i in zvar else i for i in var]))
return set(rv)
try:
p = Poly(eq)
assert not any(g.is_number for g in p.gens)
eq = p.as_expr()
assert eq.is_polynomial()
except (GeneratorsNeeded, AssertionError):
raise TypeError(filldedent('''
Equation should be a polynomial with Rational coefficients.'''))
# permute only sign
do_permute_signs = False
# permute sign and values
do_permute_signs_var = False
# permute few signs
permute_few_signs = False
try:
# if we know that factoring should not be attempted, skip
# the factoring step
v, c, t = classify_diop(eq)
# check for permute sign
if permute:
len_var = len(v)
permute_signs_for = [
GeneralSumOfSquares.name,
GeneralSumOfEvenPowers.name]
permute_signs_check = [
HomogeneousTernaryQuadratic.name,
HomogeneousTernaryQuadraticNormal.name,
BinaryQuadratic.name]
if t in permute_signs_for:
do_permute_signs_var = True
elif t in permute_signs_check:
# if all the variables in eq have even powers
# then do_permute_sign = True
if len_var == 3:
var_mul = list(subsets(v, 2))
# here var_mul is like [(x, y), (x, z), (y, z)]
xy_coeff = True
x_coeff = True
var1_mul_var2 = map(lambda a: a[0]*a[1], var_mul)
# if coeff(y*z), coeff(y*x), coeff(x*z) is not 0 then
# `xy_coeff` => True and do_permute_sign => False.
# Means no permuted solution.
for v1_mul_v2 in var1_mul_var2:
try:
coeff = c[v1_mul_v2]
except KeyError:
coeff = 0
xy_coeff = bool(xy_coeff) and bool(coeff)
var_mul = list(subsets(v, 1))
# here var_mul is like [(x,), (y, )]
for v1 in var_mul:
try:
coeff = c[v1[0]]
except KeyError:
coeff = 0
x_coeff = bool(x_coeff) and bool(coeff)
if not any([xy_coeff, x_coeff]):
# means only x**2, y**2, z**2, const is present
do_permute_signs = True
elif not x_coeff:
permute_few_signs = True
elif len_var == 2:
var_mul = list(subsets(v, 2))
# here var_mul is like [(x, y)]
xy_coeff = True
x_coeff = True
var1_mul_var2 = map(lambda x: x[0]*x[1], var_mul)
for v1_mul_v2 in var1_mul_var2:
try:
coeff = c[v1_mul_v2]
except KeyError:
coeff = 0
xy_coeff = bool(xy_coeff) and bool(coeff)
var_mul = list(subsets(v, 1))
# here var_mul is like [(x,), (y, )]
for v1 in var_mul:
try:
coeff = c[v1[0]]
except KeyError:
coeff = 0
x_coeff = bool(x_coeff) and bool(coeff)
if not any([xy_coeff, x_coeff]):
# means only x**2, y**2 and const is present
# so we can get more soln by permuting this soln.
do_permute_signs = True
elif not x_coeff:
# when coeff(x), coeff(y) is not present then signs of
# x, y can be permuted such that their sign are same
# as sign of x*y.
# e.g 1. (x_val,y_val)=> (x_val,y_val), (-x_val,-y_val)
# 2. (-x_vall, y_val)=> (-x_val,y_val), (x_val,-y_val)
permute_few_signs = True
if t == 'general_sum_of_squares':
# trying to factor such expressions will sometimes hang
terms = [(eq, 1)]
else:
raise TypeError
except (TypeError, NotImplementedError):
fl = factor_list(eq)
if fl[0].is_Rational and fl[0] != 1:
return diophantine(eq/fl[0], param=param, syms=syms, permute=permute)
terms = fl[1]
sols = set()
for term in terms:
base, _ = term
var_t, _, eq_type = classify_diop(base, _dict=False)
_, base = signsimp(base, evaluate=False).as_coeff_Mul()
solution = diop_solve(base, param)
if eq_type in [
Linear.name,
HomogeneousTernaryQuadratic.name,
HomogeneousTernaryQuadraticNormal.name,
GeneralPythagorean.name]:
sols.add(merge_solution(var, var_t, solution))
elif eq_type in [
BinaryQuadratic.name,
GeneralSumOfSquares.name,
GeneralSumOfEvenPowers.name,
Univariate.name]:
for sol in solution:
sols.add(merge_solution(var, var_t, sol))
else:
raise NotImplementedError('unhandled type: %s' % eq_type)
# remove null merge results
if () in sols:
sols.remove(())
null = tuple([0]*len(var))
# if there is no solution, return trivial solution
if not sols and eq.subs(zip(var, null)).is_zero:
sols.add(null)
final_soln = set()
for sol in sols:
if all(_is_int(s) for s in sol):
if do_permute_signs:
permuted_sign = set(permute_signs(sol))
final_soln.update(permuted_sign)
elif permute_few_signs:
lst = list(permute_signs(sol))
lst = list(filter(lambda x: x[0]*x[1] == sol[1]*sol[0], lst))
permuted_sign = set(lst)
final_soln.update(permuted_sign)
elif do_permute_signs_var:
permuted_sign_var = set(signed_permutations(sol))
final_soln.update(permuted_sign_var)
else:
final_soln.add(sol)
else:
final_soln.add(sol)
return final_soln
|
def diophantine(eq, param=symbols("t", integer=True), syms=None,
permute=False):
"""
Simplify the solution procedure of diophantine equation ``eq`` by
converting it into a product of terms which should equal zero.
Explanation
===========
For example, when solving, `x^2 - y^2 = 0` this is treated as
`(x + y)(x - y) = 0` and `x + y = 0` and `x - y = 0` are solved
independently and combined. Each term is solved by calling
``diop_solve()``. (Although it is possible to call ``diop_solve()``
directly, one must be careful to pass an equation in the correct
form and to interpret the output correctly; ``diophantine()`` is
the public-facing function to use in general.)
Output of ``diophantine()`` is a set of tuples. The elements of the
tuple are the solutions for each variable in the equation and
are arranged according to the alphabetic ordering of the variables.
e.g. For an equation with two variables, `a` and `b`, the first
element of the tuple is the solution for `a` and the second for `b`.
Usage
=====
``diophantine(eq, t, syms)``: Solve the diophantine
equation ``eq``.
``t`` is the optional parameter to be used by ``diop_solve()``.
``syms`` is an optional list of symbols which determines the
order of the elements in the returned tuple.
By default, only the base solution is returned. If ``permute`` is set to
True then permutations of the base solution and/or permutations of the
signs of the values will be returned when applicable.
Examples
========
>>> from sympy.solvers.diophantine import diophantine
>>> from sympy.abc import a, b
>>> eq = a**4 + b**4 - (2**4 + 3**4)
>>> diophantine(eq)
{(2, 3)}
>>> diophantine(eq, permute=True)
{(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)}
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.abc import x, y, z
>>> diophantine(x**2 - y**2)
{(t_0, -t_0), (t_0, t_0)}
>>> diophantine(x*(2*x + 3*y - z))
{(0, n1, n2), (t_0, t_1, 2*t_0 + 3*t_1)}
>>> diophantine(x**2 + 3*x*y + 4*x)
{(0, n1), (3*t_0 - 4, -t_0)}
See Also
========
diop_solve()
sympy.utilities.iterables.permute_signs
sympy.utilities.iterables.signed_permutations
"""
from sympy.utilities.iterables import (
subsets, permute_signs, signed_permutations)
eq = _sympify(eq)
if isinstance(eq, Eq):
eq = eq.lhs - eq.rhs
var = list(eq.expand(force=True).free_symbols)
var.sort(key=default_sort_key)
if syms:
if not is_sequence(syms):
raise TypeError(
'syms should be given as a sequence, e.g. a list')
syms = [i for i in syms if i in var]
if syms != var:
dict_sym_index = dict(zip(syms, range(len(syms))))
return {tuple([t[dict_sym_index[i]] for i in var])
for t in diophantine(eq, param, permute=True)}
n, d = eq.as_numer_denom()
if n.is_number:
return set()
if not d.is_number:
dsol = diophantine(d)
good = diophantine(n) - dsol
return {s for s in good if _mexpand(d.subs(zip(var, s)))}
else:
eq = n
eq = factor_terms(eq)
assert not eq.is_number
eq = eq.as_independent(*var, as_Add=False)[1]
# check for plausible integer non-solutions
ivar = {s: Dummy(integer=True) for s in var if not s.is_integer}
if eq.xreplace(ivar).is_integer is False:
return set()
nzvar = {s: Dummy(integer=True, zero=False) for s in var if s.is_zero is None}
if eq.xreplace(nzvar).is_integer is False:
# perhaps zero if 1 or more of the symbols is 0
rv = []
for i in subsets(nzvar):
zvar = {s: 0 for s in i}
if eq.xreplace(zvar).is_integer:
if len(i) == len(nzvar) and rv:
continue
rv.append(tuple(
[S.Zero if i in zvar else i for i in var]))
return set(rv)
try:
p = Poly(eq)
assert not any(g.is_number for g in p.gens)
eq = p.as_expr()
assert eq.is_polynomial()
except (GeneratorsNeeded, AssertionError):
raise TypeError(filldedent('''
Equation should be a polynomial with Rational coefficients.'''))
# permute only sign
do_permute_signs = False
# permute sign and values
do_permute_signs_var = False
# permute few signs
permute_few_signs = False
try:
# if we know that factoring should not be attempted, skip
# the factoring step
v, c, t = classify_diop(eq)
# check for permute sign
if permute:
len_var = len(v)
permute_signs_for = [
GeneralSumOfSquares.name,
GeneralSumOfEvenPowers.name]
permute_signs_check = [
HomogeneousTernaryQuadratic.name,
HomogeneousTernaryQuadraticNormal.name,
BinaryQuadratic.name]
if t in permute_signs_for:
do_permute_signs_var = True
elif t in permute_signs_check:
# if all the variables in eq have even powers
# then do_permute_sign = True
if len_var == 3:
var_mul = list(subsets(v, 2))
# here var_mul is like [(x, y), (x, z), (y, z)]
xy_coeff = True
x_coeff = True
var1_mul_var2 = map(lambda a: a[0]*a[1], var_mul)
# if coeff(y*z), coeff(y*x), coeff(x*z) is not 0 then
# `xy_coeff` => True and do_permute_sign => False.
# Means no permuted solution.
for v1_mul_v2 in var1_mul_var2:
try:
coeff = c[v1_mul_v2]
except KeyError:
coeff = 0
xy_coeff = bool(xy_coeff) and bool(coeff)
var_mul = list(subsets(v, 1))
# here var_mul is like [(x,), (y, )]
for v1 in var_mul:
try:
coeff = c[v1[0]]
except KeyError:
coeff = 0
x_coeff = bool(x_coeff) and bool(coeff)
if not any([xy_coeff, x_coeff]):
# means only x**2, y**2, z**2, const is present
do_permute_signs = True
elif not x_coeff:
permute_few_signs = True
elif len_var == 2:
var_mul = list(subsets(v, 2))
# here var_mul is like [(x, y)]
xy_coeff = True
x_coeff = True
var1_mul_var2 = map(lambda x: x[0]*x[1], var_mul)
for v1_mul_v2 in var1_mul_var2:
try:
coeff = c[v1_mul_v2]
except KeyError:
coeff = 0
xy_coeff = bool(xy_coeff) and bool(coeff)
var_mul = list(subsets(v, 1))
# here var_mul is like [(x,), (y, )]
for v1 in var_mul:
try:
coeff = c[v1[0]]
except KeyError:
coeff = 0
x_coeff = bool(x_coeff) and bool(coeff)
if not any([xy_coeff, x_coeff]):
# means only x**2, y**2 and const is present
# so we can get more soln by permuting this soln.
do_permute_signs = True
elif not x_coeff:
# when coeff(x), coeff(y) is not present then signs of
# x, y can be permuted such that their sign are same
# as sign of x*y.
# e.g 1. (x_val,y_val)=> (x_val,y_val), (-x_val,-y_val)
# 2. (-x_vall, y_val)=> (-x_val,y_val), (x_val,-y_val)
permute_few_signs = True
if t == 'general_sum_of_squares':
# trying to factor such expressions will sometimes hang
terms = [(eq, 1)]
else:
raise TypeError
except (TypeError, NotImplementedError):
fl = factor_list(eq)
if fl[0].is_Rational and fl[0] != 1:
return diophantine(eq/fl[0], param=param, syms=syms, permute=permute)
terms = fl[1]
sols = set()
for term in terms:
base, _ = term
var_t, _, eq_type = classify_diop(base, _dict=False)
_, base = signsimp(base, evaluate=False).as_coeff_Mul()
solution = diop_solve(base, param)
if eq_type in [
Linear.name,
HomogeneousTernaryQuadratic.name,
HomogeneousTernaryQuadraticNormal.name,
GeneralPythagorean.name]:
sols.add(merge_solution(var, var_t, solution))
elif eq_type in [
BinaryQuadratic.name,
GeneralSumOfSquares.name,
GeneralSumOfEvenPowers.name,
Univariate.name]:
for sol in solution:
sols.add(merge_solution(var, var_t, sol))
else:
raise NotImplementedError('unhandled type: %s' % eq_type)
# remove null merge results
if () in sols:
sols.remove(())
null = tuple([0]*len(var))
# if there is no solution, return trivial solution
if not sols and eq.subs(zip(var, null)).is_zero:
sols.add(null)
final_soln = set()
for sol in sols:
if all(_is_int(s) for s in sol):
if do_permute_signs:
permuted_sign = set(permute_signs(sol))
final_soln.update(permuted_sign)
elif permute_few_signs:
lst = list(permute_signs(sol))
lst = list(filter(lambda x: x[0]*x[1] == sol[1]*sol[0], lst))
permuted_sign = set(lst)
final_soln.update(permuted_sign)
elif do_permute_signs_var:
permuted_sign_var = set(signed_permutations(sol))
final_soln.update(permuted_sign_var)
else:
final_soln.add(sol)
else:
final_soln.add(sol)
return final_soln
|
35,245 |
def get_compiler_base_options(compiler_path) -> List[str]:
"""Returns base options for nvcc compiler.
"""
global _compiler_base_options
if _compiler_base_options is None:
_compiler_base_options = _get_compiler_base_options(compiler_path)
return _compiler_base_options
|
def get_compiler_base_options(compiler_path: str) -> List[str]:
"""Returns base options for nvcc compiler.
"""
global _compiler_base_options
if _compiler_base_options is None:
_compiler_base_options = _get_compiler_base_options(compiler_path)
return _compiler_base_options
|
25,965 |
def load_arguments(self, _):
# PARAMETER REGISTRATION
fields_arg_type = CLIArgumentType(
nargs='+',
help='Space-separated customized output fields.',
validator=validate_query_fields,
arg_type=get_enum_type(['key', 'value', 'label', 'content_type', 'etag', 'tags', 'locked', 'last_modified'])
)
feature_fields_arg_type = CLIArgumentType(
nargs='+',
help='Customize output fields for Feature Flags.',
validator=validate_feature_query_fields,
arg_type=get_enum_type(['key', 'label', 'locked', 'last_modified', 'state', 'description', 'conditions'])
)
filter_parameters_arg_type = CLIArgumentType(
validator=validate_filter_parameters,
help="Space-separated filter parameters in 'name[=value]' format. The value must be an escaped JSON string.",
nargs='*'
)
datatime_filter_arg_type = CLIArgumentType(
validator=validate_datetime,
help='Format: "YYYY-MM-DDThh:mm:ssZ". If no time zone specified, use UTC by default.'
)
top_arg_type = CLIArgumentType(
options_list=['--top', '-t'],
type=int,
help='Maximum number of items to return. Must be a positive integer. Default to 100.'
)
identities_arg_type = CLIArgumentType(
nargs='*',
validator=validate_identity
)
with self.argument_context('appconfig') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('name', options_list=['--name', '-n'], id_part='None', help='Name of the App Configuration. You can configure the default name using `az configure --defaults app_configuration_store=<name>`', configured_default='app_configuration_store')
c.argument('connection_string', validator=validate_connection_string, configured_default='appconfig_connection_string',
help="Combination of access key and endpoint of App Configuration. Can be found using 'az appconfig credential list'. Users can preset it using `az configure --defaults appconfig_connection_string=<connection_string>` or environment variable with the name AZURE_APPCONFIG_CONNECTION_STRING.")
c.argument('yes', options_list=['--yes', '-y'], help='Do not prompt for confirmation.')
c.argument('datetime', arg_type=datatime_filter_arg_type)
c.argument('top', arg_type=top_arg_type)
c.argument('all_', options_list=['--all'], action='store_true', help="List all items.")
c.argument('fields', arg_type=fields_arg_type)
c.argument('sku', help='The sku of App Configuration', arg_type=get_enum_type(['Free', 'Standard']))
c.argument('endpoint', help='If auth mode is "login", provide endpoint URL of the App Configuration. The endpoint can be retrieved using "az appconfig show" command. You can configure the default endpoint using `az configure --defaults appconfig_endpoint=<endpoint>`', configured_default='appconfig_endpoint')
c.argument('auth_mode', arg_type=get_enum_type(['login', 'key']), configured_default='appconfig_auth_mode', validator=validate_auth_mode,
help='This parameter can be used for indicating how a data operation is to be authorized. ' +
'If the auth mode is "key", provide connection string or store name and your account access keys will be retrieved for authorization. ' +
'If the auth mode is "login", provide the store endpoint or store name and your "az login" credentials will be used for authorization. ' +
'You can configure the default auth mode using `az configure --defaults appconfig_auth_mode=<auth_mode>`. ' +
'For more information, see https://docs.microsoft.com/en-us/azure/azure-app-configuration/concept-enable-rbac')
with self.argument_context('appconfig create') as c:
c.argument('location', options_list=['--location', '-l'], arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('assign_identity', arg_type=identities_arg_type,
help='Space-separated list of managed identities to be assigned. Use "[system]" to refer to system-assigned managed identity or a resource ID to refer to user-assigned managed identity. If this argument is provided without any value, system-assigned managed identity will be assigned by default. If this argument is not provided, no managed identities will be assigned to this App Configuration store.')
c.argument('enable_public_network', options_list=['--enable-public-network', '-e'], arg_type=get_three_state_flag(), is_preview=True,
help='When true, requests coming from public networks have permission to access this store while private endpoint is enabled. When false, only requests made through Private Links can reach this store.')
c.argument('disable_local_auth', arg_type=get_three_state_flag(), is_preview=True, help='Disables all authentication methods other than AAD authentication.')
with self.argument_context('appconfig update') as c:
c.argument('tags', arg_type=tags_type)
c.argument('enable_public_network', options_list=['--enable-public-network', '-e'], arg_type=get_three_state_flag(), is_preview=True,
help='When true, requests coming from public networks have permission to access this store while private endpoint is enabled. When false, only requests made through Private Links can reach this store.')
c.argument('disable_local_auth', arg_type=get_three_state_flag(), is_preview=True, help='Disables all authentication methods other than AAD authentication.')
with self.argument_context('appconfig update', arg_group='Customer Managed Key') as c:
c.argument('encryption_key_name', help='The name of the KeyVault key.')
c.argument('encryption_key_vault', help='The URI of the KeyVault.')
c.argument('encryption_key_version', help='The version of the KeyVault key. Use the latest version by default.')
c.argument('identity_client_id', help='Client ID of the managed identity with wrap and unwrap access to encryption key. Use system-assigned managed identity by default.')
with self.argument_context('appconfig identity assign') as c:
c.argument('identities', arg_type=identities_arg_type, help="Accept system-assigned or user-assigned managed identities separated by spaces. Use '[system]' to refer to system-assigned managed identity or a resource ID to refer to user-assigned managed identity. If this argument is not provided or this argument is provided without any value, system-assigned managed identity will be used by default.")
with self.argument_context('appconfig identity remove') as c:
c.argument('identities', arg_type=identities_arg_type, help="Accept system-assigned or user-assigned managed identities separated by spaces. Use '[system]' to refer to system-assigned managed identity, '[all]' for all managed identities or a resource ID to refer user-assigned managed identity. If this argument is not provided or this argument is provided without any value, system-assigned managed identity will be removed by default.")
with self.argument_context('appconfig credential regenerate') as c:
c.argument('id_', options_list=['--id'], help='Id of the key to be regenerated. Can be found using az appconfig credential list command.')
with self.argument_context('appconfig kv import') as c:
c.argument('label', help="Imported KVs and feature flags will be assigned with this label. If no label specified, will assign null label.")
c.argument('prefix', help="This prefix will be appended to the front of imported keys. Prefix will be ignored for feature flags.")
c.argument('source', options_list=['--source', '-s'], arg_type=get_enum_type(['file', 'appconfig', 'appservice']), validator=validate_import, help="The source of importing. Note that importing feature flags from appservice is not supported.")
c.argument('yes', help="Do not prompt for preview.")
c.argument('skip_features', help="Import only key values and exclude all feature flags. By default, all feature flags will be imported from file or appconfig. Not applicable for appservice.", arg_type=get_three_state_flag())
c.argument('content_type', help='Content type of all imported items.')
with self.argument_context('appconfig kv import', arg_group='File') as c:
c.argument('path', help='Local configuration file path. Required for file arguments.')
c.argument('format_', options_list=['--format'], arg_type=get_enum_type(['json', 'yaml', 'properties']), help='Imported file format. Required for file arguments. Currently, feature flags are not supported in properties format.')
c.argument('depth', validator=validate_import_depth, help="Depth for flattening the json or yaml file to key-value pairs. Flatten to the deepest level by default if --separator is provided. Not applicable for property files or feature flags.")
# bypass cli allowed values limition
c.argument('separator', validator=validate_separator, help="Delimiter for flattening the json or yaml file to key-value pairs. Separator will be ignored for property files and feature flags. Supported values: '.', ',', ';', '-', '_', '__', '/', ':' ")
with self.argument_context('appconfig kv import', arg_group='AppConfig') as c:
c.argument('src_name', help='The name of the source App Configuration.')
c.argument('src_connection_string', validator=validate_connection_string, help="Combination of access key and endpoint of the source store.")
c.argument('src_key', help='If no key specified, import all keys by default. Support star sign as filters, for instance abc* means keys with abc as prefix. Key filtering not applicable for feature flags. By default, all feature flags with specified label will be imported.')
c.argument('src_label', help="Only keys with this label in source AppConfig will be imported. If no value specified, import keys with null label by default. Support star sign as filters, for instance * means all labels, abc* means labels with abc as prefix.")
c.argument('preserve_labels', arg_type=get_three_state_flag(), help="Flag to preserve labels from source AppConfig. This argument should NOT be specified along with --label.")
c.argument('src_endpoint', help='If --src-auth-mode is "login", provide endpoint URL of the source App Configuration.')
c.argument('src_auth_mode', arg_type=get_enum_type(['login', 'key']),
help='Auth mode for connecting to source App Configuration. For details, refer to "--auth-mode" argument.')
with self.argument_context('appconfig kv import', arg_group='AppService') as c:
c.argument('appservice_account', validator=validate_appservice_name_or_id, help='ARM ID for AppService OR the name of the AppService, assuming it is in the same subscription and resource group as the App Configuration. Required for AppService arguments')
with self.argument_context('appconfig kv export') as c:
c.argument('label', help="Only keys and feature flags with this label will be exported. If no label specified, export keys and feature flags with null label by default. Only when export destination is appconfig, we support star sign as filters, for instance * means all labels and abc* means labels with abc as prefix. Label filters are not supported when exporting to file or appservice.")
c.argument('prefix', help="Prefix to be trimmed from keys. Prefix will be ignored for feature flags.")
c.argument('key', help='If no key specified, return all keys by default. Support star sign as filters, for instance abc* means keys with abc as prefix. Key filtering not applicable for feature flags. By default, all feature flags with specified label will be exported.')
c.argument('destination', options_list=['--destination', '-d'], arg_type=get_enum_type(['file', 'appconfig', 'appservice']), validator=validate_export, help="The destination of exporting. Note that exporting feature flags to appservice is not supported.")
c.argument('yes', help="Do not prompt for preview.")
c.argument('skip_features', help="Export items excluding all feature flags. By default, all features with the specified label will be exported to file or appconfig. Not applicable for appservice.", arg_type=get_three_state_flag())
c.argument('skip_keyvault', help="Export items excluding all key vault references. By default, all key vault references with the specified label will be exported.", arg_type=get_three_state_flag())
with self.argument_context('appconfig kv export', arg_group='File') as c:
c.argument('path', help='Local configuration file path. Required for file arguments.')
c.argument('format_', options_list=['--format'], arg_type=get_enum_type(['json', 'yaml', 'properties']), help='File format exporting to. Required for file arguments. Currently, feature flags are not supported in properties format.')
c.argument('depth', validator=validate_import_depth, help="Depth for flattening the key-value pairs to json or yaml file. Flatten to the deepest level by default. Not applicable for property files or feature flags.")
# bypass cli allowed values limition
c.argument('separator', validator=validate_separator, help="Delimiter for flattening the key-value pairs to json or yaml file. Required for exporting hierarchical structure. Separator will be ignored for property files and feature flags. Supported values: '.', ',', ';', '-', '_', '__', '/', ':' ")
c.argument('naming_convention', arg_type=get_enum_type(['pascal', 'camel', 'underscore', 'hyphen']), help='Naming convention to be used for "Feature Management" section of file. Example: pascal = FeatureManagement, camel = featureManagement, underscore = feature_management, hyphen = feature-management.')
c.argument('resolve_keyvault', arg_type=get_three_state_flag(), validator=validate_resolve_keyvault, help="Resolve the content of key vault reference.")
with self.argument_context('appconfig kv export', arg_group='AppConfig') as c:
c.argument('dest_name', help='The name of the destination App Configuration.')
c.argument('dest_connection_string', validator=validate_connection_string, help="Combination of access key and endpoint of the destination store.")
c.argument('dest_label', help="Exported KVs will be labeled with this destination label. If neither --dest-label nor --preserve-labels is specified, will assign null label.")
c.argument('preserve_labels', arg_type=get_three_state_flag(), help="Flag to preserve labels from source AppConfig. This argument should NOT be specified along with --dest-label.")
c.argument('dest_endpoint', help='If --dest-auth-mode is "login", provide endpoint URL of the destination App Configuration.')
c.argument('dest_auth_mode', arg_type=get_enum_type(['login', 'key']),
help='Auth mode for connecting to destination App Configuration. For details, refer to "--auth-mode" argument.')
with self.argument_context('appconfig kv export', arg_group='AppService') as c:
c.argument('appservice_account', validator=validate_appservice_name_or_id, help='ARM ID for AppService OR the name of the AppService, assuming it is in the same subscription and resource group as the App Configuration. Required for AppService arguments')
with self.argument_context('appconfig kv set') as c:
c.argument('key', validator=validate_key, help="Key to be set. Key cannot be a '.' or '..', or contain the '%' character.")
c.argument('label', help="If no label specified, set the key with null label by default")
c.argument('tags', arg_type=tags_type)
c.argument('content_type', help='Content type of the keyvalue to be set.')
c.argument('value', help='Value of the keyvalue to be set.')
with self.argument_context('appconfig kv set-keyvault') as c:
c.argument('key', validator=validate_key, help="Key to be set. Key cannot be a '.' or '..', or contain the '%' character.")
c.argument('label', help="If no label specified, set the key with null label by default")
c.argument('tags', arg_type=tags_type)
c.argument('secret_identifier', validator=validate_secret_identifier, help="ID of the Key Vault object. Can be found using 'az keyvault {collection} show' command, where collection is key, secret or certificate. To set reference to the latest version of your secret, remove version information from secret identifier.")
with self.argument_context('appconfig kv delete') as c:
c.argument('key', help='Support star sign as filters, for instance * means all key and abc* means keys with abc as prefix.')
c.argument('label', help="If no label specified, delete entry with null label. Support star sign as filters, for instance * means all label and abc* means labels with abc as prefix.")
with self.argument_context('appconfig kv show') as c:
c.argument('key', help='Key to be showed.')
c.argument('label', help="If no label specified, show entry with null label. Filtering is not supported.")
with self.argument_context('appconfig kv list') as c:
c.argument('key', help='If no key specified, return all keys by default. Support star sign as filters, for instance abc* means keys with abc as prefix.')
c.argument('label', help="If no label specified, list all labels. Support star sign as filters, for instance abc* means labels with abc as prefix. Use '\\0' for null label.")
c.argument('resolve_keyvault', arg_type=get_three_state_flag(), help="Resolve the content of key vault reference. This argument should NOT be specified along with --fields. Instead use --query for customized query.")
with self.argument_context('appconfig kv restore') as c:
c.argument('key', help='If no key specified, restore all keys by default. Support star sign as filters, for instance abc* means keys with abc as prefix.')
c.argument('label', help="If no label specified, restore all key-value pairs with all labels. Support star sign as filters, for instance abc* means labels with abc as prefix. Use '\\0' for null label.")
with self.argument_context('appconfig kv lock') as c:
c.argument('key', help='Key to be locked.')
c.argument('label', help="If no label specified, lock entry with null label. Filtering is not supported.")
with self.argument_context('appconfig kv unlock') as c:
c.argument('key', help='Key to be unlocked.')
c.argument('label', help="If no label specified, unlock entry with null label. Filtering is not supported.")
with self.argument_context('appconfig revision list') as c:
c.argument('key', help='If no key specified, return all keys by default. Support star sign as filters, for instance abc* means keys with abc as prefix.')
c.argument('label', help="If no label specified, list all labels. Support star sign as filters, for instance abc* means labels with abc as prefix. Use '\\0' for null label.")
with self.argument_context('appconfig feature show') as c:
c.argument('feature', help='Name of the feature flag to be retrieved')
c.argument('label', help="If no label specified, show entry with null label. Filtering is not supported.")
c.argument('fields', arg_type=feature_fields_arg_type)
with self.argument_context('appconfig feature set') as c:
c.argument('feature', validator=validate_feature, help="Name of the feature flag to be set. Only alphanumeric characters, '.', '-' and '_' are allowed.")
c.argument('label', help="If no label specified, set the feature flag with null label by default")
c.argument('description', help='Description of the feature flag to be set.')
with self.argument_context('appconfig feature delete') as c:
c.argument('feature', help='Key of the feature to be deleted. Support star sign as filters, for instance * means all key and abc* means keys with abc as prefix. Comma separated keys are not supported. Please provide escaped string if your feature name contains comma.')
c.argument('label', help="If no label specified, delete the feature flag with null label by default. Support star sign as filters, for instance * means all labels and abc* means labels with abc as prefix.")
with self.argument_context('appconfig feature list') as c:
c.argument('feature', help='Key of the feature to be listed. Support star sign as filters, for instance * means all key and abc* means keys with abc as prefix. Comma separated keys are not supported. Please provide escaped string if your feature name contains comma.')
c.argument('label', help="If no label specified, list all labels. Support star sign as filters, for instance * means all labels and abc* means labels with abc as prefix. Use '\\0' for null label.")
c.argument('fields', arg_type=feature_fields_arg_type)
c.argument('all_', help="List all feature flags.")
with self.argument_context('appconfig feature lock') as c:
c.argument('feature', help='Key of the feature to be locked.')
c.argument('label', help="If no label specified, lock the feature flag with null label by default.")
with self.argument_context('appconfig feature unlock') as c:
c.argument('feature', help='Key of the feature to be unlocked.')
c.argument('label', help="If no label specified, unlock the feature flag with null label by default.")
with self.argument_context('appconfig feature enable') as c:
c.argument('feature', help='Key of the feature to be enabled.')
c.argument('label', help="If no label specified, enable the feature flag with null label by default.")
with self.argument_context('appconfig feature disable') as c:
c.argument('feature', help='Key of the feature to be disabled.')
c.argument('label', help="If no label specified, disable the feature flag with null label by default.")
with self.argument_context('appconfig feature filter add') as c:
c.argument('feature', help='Name of the feature to which you want to add the filter.')
c.argument('label', help="If no label specified, add to the feature flag with null label by default.")
c.argument('filter_name', help='Name of the filter to be added.')
c.argument('filter_parameters', arg_type=filter_parameters_arg_type)
c.argument('index', type=int, help='Zero-based index in the list of filters where you want to insert the new filter. If no index is specified or index is invalid, filter will be added to the end of the list.')
with self.argument_context('appconfig feature filter delete') as c:
c.argument('feature', help='Name of the feature from which you want to delete the filter.')
c.argument('label', help="If no label specified, delete from the feature flag with null label by default.")
c.argument('filter_name', help='Name of the filter to be deleted.')
c.argument('index', type=int, help='Zero-based index of the filter to be deleted in case there are multiple instances with same filter name.')
c.argument('all_', help="Delete all filters associated with a feature flag.")
with self.argument_context('appconfig feature filter show') as c:
c.argument('feature', help='Name of the feature which contains the filter.')
c.argument('label', help="If no label specified, show the feature flag with null label by default.")
c.argument('filter_name', help='Name of the filter to be displayed.')
c.argument('index', type=int, help='Zero-based index of the filter to be displayed in case there are multiple instances with same filter name.')
with self.argument_context('appconfig feature filter list') as c:
c.argument('feature', help='Name of the feature whose filters you want to be displayed.')
c.argument('label', help="If no label specified, display filters from the feature flag with null label by default.")
c.argument('all_', help="List all filters associated with a feature flag.")
|
def load_arguments(self, _):
# PARAMETER REGISTRATION
fields_arg_type = CLIArgumentType(
nargs='+',
help='Space-separated customized output fields.',
validator=validate_query_fields,
arg_type=get_enum_type(['key', 'value', 'label', 'content_type', 'etag', 'tags', 'locked', 'last_modified'])
)
feature_fields_arg_type = CLIArgumentType(
nargs='+',
help='Customize output fields for Feature Flags.',
validator=validate_feature_query_fields,
arg_type=get_enum_type(['key', 'label', 'locked', 'last_modified', 'state', 'description', 'conditions'])
)
filter_parameters_arg_type = CLIArgumentType(
validator=validate_filter_parameters,
help="Space-separated filter parameters in 'name[=value]' format. The value must be an escaped JSON string.",
nargs='*'
)
datatime_filter_arg_type = CLIArgumentType(
validator=validate_datetime,
help='Format: "YYYY-MM-DDThh:mm:ssZ". If no time zone specified, use UTC by default.'
)
top_arg_type = CLIArgumentType(
options_list=['--top', '-t'],
type=int,
help='Maximum number of items to return. Must be a positive integer. Default to 100.'
)
identities_arg_type = CLIArgumentType(
nargs='*',
validator=validate_identity
)
with self.argument_context('appconfig') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('name', options_list=['--name', '-n'], id_part='None', help='Name of the App Configuration. You can configure the default name using `az configure --defaults app_configuration_store=<name>`', configured_default='app_configuration_store')
c.argument('connection_string', validator=validate_connection_string, configured_default='appconfig_connection_string',
help="Combination of access key and endpoint of App Configuration. Can be found using 'az appconfig credential list'. Users can preset it using `az configure --defaults appconfig_connection_string=<connection_string>` or environment variable with the name AZURE_APPCONFIG_CONNECTION_STRING.")
c.argument('yes', options_list=['--yes', '-y'], help='Do not prompt for confirmation.')
c.argument('datetime', arg_type=datatime_filter_arg_type)
c.argument('top', arg_type=top_arg_type)
c.argument('all_', options_list=['--all'], action='store_true', help="List all items.")
c.argument('fields', arg_type=fields_arg_type)
c.argument('sku', help='The sku of App Configuration', arg_type=get_enum_type(['Free', 'Standard']))
c.argument('endpoint', help='If auth mode is "login", provide endpoint URL of the App Configuration. The endpoint can be retrieved using "az appconfig show" command. You can configure the default endpoint using `az configure --defaults appconfig_endpoint=<endpoint>`', configured_default='appconfig_endpoint')
c.argument('auth_mode', arg_type=get_enum_type(['login', 'key']), configured_default='appconfig_auth_mode', validator=validate_auth_mode,
help='This parameter can be used for indicating how a data operation is to be authorized. ' +
'If the auth mode is "key", provide connection string or store name and your account access keys will be retrieved for authorization. ' +
'If the auth mode is "login", provide the store endpoint or store name and your "az login" credentials will be used for authorization. ' +
'You can configure the default auth mode using `az configure --defaults appconfig_auth_mode=<auth_mode>`. ' +
'For more information, see https://docs.microsoft.com/en-us/azure/azure-app-configuration/concept-enable-rbac')
with self.argument_context('appconfig create') as c:
c.argument('location', options_list=['--location', '-l'], arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('assign_identity', arg_type=identities_arg_type,
help='Space-separated list of managed identities to be assigned. Use "[system]" to refer to system-assigned managed identity or a resource ID to refer to user-assigned managed identity. If this argument is provided without any value, system-assigned managed identity will be assigned by default. If this argument is not provided, no managed identities will be assigned to this App Configuration store.')
c.argument('enable_public_network', options_list=['--enable-public-network', '-e'], arg_type=get_three_state_flag(), is_preview=True,
help='When true, requests coming from public networks have permission to access this store while private endpoint is enabled. When false, only requests made through Private Links can reach this store.')
c.argument('disable_local_auth', arg_type=get_three_state_flag(), is_preview=True, help='Disable all authentication methods other than AAD authentication.')
with self.argument_context('appconfig update') as c:
c.argument('tags', arg_type=tags_type)
c.argument('enable_public_network', options_list=['--enable-public-network', '-e'], arg_type=get_three_state_flag(), is_preview=True,
help='When true, requests coming from public networks have permission to access this store while private endpoint is enabled. When false, only requests made through Private Links can reach this store.')
c.argument('disable_local_auth', arg_type=get_three_state_flag(), is_preview=True, help='Disables all authentication methods other than AAD authentication.')
with self.argument_context('appconfig update', arg_group='Customer Managed Key') as c:
c.argument('encryption_key_name', help='The name of the KeyVault key.')
c.argument('encryption_key_vault', help='The URI of the KeyVault.')
c.argument('encryption_key_version', help='The version of the KeyVault key. Use the latest version by default.')
c.argument('identity_client_id', help='Client ID of the managed identity with wrap and unwrap access to encryption key. Use system-assigned managed identity by default.')
with self.argument_context('appconfig identity assign') as c:
c.argument('identities', arg_type=identities_arg_type, help="Accept system-assigned or user-assigned managed identities separated by spaces. Use '[system]' to refer to system-assigned managed identity or a resource ID to refer to user-assigned managed identity. If this argument is not provided or this argument is provided without any value, system-assigned managed identity will be used by default.")
with self.argument_context('appconfig identity remove') as c:
c.argument('identities', arg_type=identities_arg_type, help="Accept system-assigned or user-assigned managed identities separated by spaces. Use '[system]' to refer to system-assigned managed identity, '[all]' for all managed identities or a resource ID to refer user-assigned managed identity. If this argument is not provided or this argument is provided without any value, system-assigned managed identity will be removed by default.")
with self.argument_context('appconfig credential regenerate') as c:
c.argument('id_', options_list=['--id'], help='Id of the key to be regenerated. Can be found using az appconfig credential list command.')
with self.argument_context('appconfig kv import') as c:
c.argument('label', help="Imported KVs and feature flags will be assigned with this label. If no label specified, will assign null label.")
c.argument('prefix', help="This prefix will be appended to the front of imported keys. Prefix will be ignored for feature flags.")
c.argument('source', options_list=['--source', '-s'], arg_type=get_enum_type(['file', 'appconfig', 'appservice']), validator=validate_import, help="The source of importing. Note that importing feature flags from appservice is not supported.")
c.argument('yes', help="Do not prompt for preview.")
c.argument('skip_features', help="Import only key values and exclude all feature flags. By default, all feature flags will be imported from file or appconfig. Not applicable for appservice.", arg_type=get_three_state_flag())
c.argument('content_type', help='Content type of all imported items.')
with self.argument_context('appconfig kv import', arg_group='File') as c:
c.argument('path', help='Local configuration file path. Required for file arguments.')
c.argument('format_', options_list=['--format'], arg_type=get_enum_type(['json', 'yaml', 'properties']), help='Imported file format. Required for file arguments. Currently, feature flags are not supported in properties format.')
c.argument('depth', validator=validate_import_depth, help="Depth for flattening the json or yaml file to key-value pairs. Flatten to the deepest level by default if --separator is provided. Not applicable for property files or feature flags.")
# bypass cli allowed values limition
c.argument('separator', validator=validate_separator, help="Delimiter for flattening the json or yaml file to key-value pairs. Separator will be ignored for property files and feature flags. Supported values: '.', ',', ';', '-', '_', '__', '/', ':' ")
with self.argument_context('appconfig kv import', arg_group='AppConfig') as c:
c.argument('src_name', help='The name of the source App Configuration.')
c.argument('src_connection_string', validator=validate_connection_string, help="Combination of access key and endpoint of the source store.")
c.argument('src_key', help='If no key specified, import all keys by default. Support star sign as filters, for instance abc* means keys with abc as prefix. Key filtering not applicable for feature flags. By default, all feature flags with specified label will be imported.')
c.argument('src_label', help="Only keys with this label in source AppConfig will be imported. If no value specified, import keys with null label by default. Support star sign as filters, for instance * means all labels, abc* means labels with abc as prefix.")
c.argument('preserve_labels', arg_type=get_three_state_flag(), help="Flag to preserve labels from source AppConfig. This argument should NOT be specified along with --label.")
c.argument('src_endpoint', help='If --src-auth-mode is "login", provide endpoint URL of the source App Configuration.')
c.argument('src_auth_mode', arg_type=get_enum_type(['login', 'key']),
help='Auth mode for connecting to source App Configuration. For details, refer to "--auth-mode" argument.')
with self.argument_context('appconfig kv import', arg_group='AppService') as c:
c.argument('appservice_account', validator=validate_appservice_name_or_id, help='ARM ID for AppService OR the name of the AppService, assuming it is in the same subscription and resource group as the App Configuration. Required for AppService arguments')
with self.argument_context('appconfig kv export') as c:
c.argument('label', help="Only keys and feature flags with this label will be exported. If no label specified, export keys and feature flags with null label by default. Only when export destination is appconfig, we support star sign as filters, for instance * means all labels and abc* means labels with abc as prefix. Label filters are not supported when exporting to file or appservice.")
c.argument('prefix', help="Prefix to be trimmed from keys. Prefix will be ignored for feature flags.")
c.argument('key', help='If no key specified, return all keys by default. Support star sign as filters, for instance abc* means keys with abc as prefix. Key filtering not applicable for feature flags. By default, all feature flags with specified label will be exported.')
c.argument('destination', options_list=['--destination', '-d'], arg_type=get_enum_type(['file', 'appconfig', 'appservice']), validator=validate_export, help="The destination of exporting. Note that exporting feature flags to appservice is not supported.")
c.argument('yes', help="Do not prompt for preview.")
c.argument('skip_features', help="Export items excluding all feature flags. By default, all features with the specified label will be exported to file or appconfig. Not applicable for appservice.", arg_type=get_three_state_flag())
c.argument('skip_keyvault', help="Export items excluding all key vault references. By default, all key vault references with the specified label will be exported.", arg_type=get_three_state_flag())
with self.argument_context('appconfig kv export', arg_group='File') as c:
c.argument('path', help='Local configuration file path. Required for file arguments.')
c.argument('format_', options_list=['--format'], arg_type=get_enum_type(['json', 'yaml', 'properties']), help='File format exporting to. Required for file arguments. Currently, feature flags are not supported in properties format.')
c.argument('depth', validator=validate_import_depth, help="Depth for flattening the key-value pairs to json or yaml file. Flatten to the deepest level by default. Not applicable for property files or feature flags.")
# bypass cli allowed values limition
c.argument('separator', validator=validate_separator, help="Delimiter for flattening the key-value pairs to json or yaml file. Required for exporting hierarchical structure. Separator will be ignored for property files and feature flags. Supported values: '.', ',', ';', '-', '_', '__', '/', ':' ")
c.argument('naming_convention', arg_type=get_enum_type(['pascal', 'camel', 'underscore', 'hyphen']), help='Naming convention to be used for "Feature Management" section of file. Example: pascal = FeatureManagement, camel = featureManagement, underscore = feature_management, hyphen = feature-management.')
c.argument('resolve_keyvault', arg_type=get_three_state_flag(), validator=validate_resolve_keyvault, help="Resolve the content of key vault reference.")
with self.argument_context('appconfig kv export', arg_group='AppConfig') as c:
c.argument('dest_name', help='The name of the destination App Configuration.')
c.argument('dest_connection_string', validator=validate_connection_string, help="Combination of access key and endpoint of the destination store.")
c.argument('dest_label', help="Exported KVs will be labeled with this destination label. If neither --dest-label nor --preserve-labels is specified, will assign null label.")
c.argument('preserve_labels', arg_type=get_three_state_flag(), help="Flag to preserve labels from source AppConfig. This argument should NOT be specified along with --dest-label.")
c.argument('dest_endpoint', help='If --dest-auth-mode is "login", provide endpoint URL of the destination App Configuration.')
c.argument('dest_auth_mode', arg_type=get_enum_type(['login', 'key']),
help='Auth mode for connecting to destination App Configuration. For details, refer to "--auth-mode" argument.')
with self.argument_context('appconfig kv export', arg_group='AppService') as c:
c.argument('appservice_account', validator=validate_appservice_name_or_id, help='ARM ID for AppService OR the name of the AppService, assuming it is in the same subscription and resource group as the App Configuration. Required for AppService arguments')
with self.argument_context('appconfig kv set') as c:
c.argument('key', validator=validate_key, help="Key to be set. Key cannot be a '.' or '..', or contain the '%' character.")
c.argument('label', help="If no label specified, set the key with null label by default")
c.argument('tags', arg_type=tags_type)
c.argument('content_type', help='Content type of the keyvalue to be set.')
c.argument('value', help='Value of the keyvalue to be set.')
with self.argument_context('appconfig kv set-keyvault') as c:
c.argument('key', validator=validate_key, help="Key to be set. Key cannot be a '.' or '..', or contain the '%' character.")
c.argument('label', help="If no label specified, set the key with null label by default")
c.argument('tags', arg_type=tags_type)
c.argument('secret_identifier', validator=validate_secret_identifier, help="ID of the Key Vault object. Can be found using 'az keyvault {collection} show' command, where collection is key, secret or certificate. To set reference to the latest version of your secret, remove version information from secret identifier.")
with self.argument_context('appconfig kv delete') as c:
c.argument('key', help='Support star sign as filters, for instance * means all key and abc* means keys with abc as prefix.')
c.argument('label', help="If no label specified, delete entry with null label. Support star sign as filters, for instance * means all label and abc* means labels with abc as prefix.")
with self.argument_context('appconfig kv show') as c:
c.argument('key', help='Key to be showed.')
c.argument('label', help="If no label specified, show entry with null label. Filtering is not supported.")
with self.argument_context('appconfig kv list') as c:
c.argument('key', help='If no key specified, return all keys by default. Support star sign as filters, for instance abc* means keys with abc as prefix.')
c.argument('label', help="If no label specified, list all labels. Support star sign as filters, for instance abc* means labels with abc as prefix. Use '\\0' for null label.")
c.argument('resolve_keyvault', arg_type=get_three_state_flag(), help="Resolve the content of key vault reference. This argument should NOT be specified along with --fields. Instead use --query for customized query.")
with self.argument_context('appconfig kv restore') as c:
c.argument('key', help='If no key specified, restore all keys by default. Support star sign as filters, for instance abc* means keys with abc as prefix.')
c.argument('label', help="If no label specified, restore all key-value pairs with all labels. Support star sign as filters, for instance abc* means labels with abc as prefix. Use '\\0' for null label.")
with self.argument_context('appconfig kv lock') as c:
c.argument('key', help='Key to be locked.')
c.argument('label', help="If no label specified, lock entry with null label. Filtering is not supported.")
with self.argument_context('appconfig kv unlock') as c:
c.argument('key', help='Key to be unlocked.')
c.argument('label', help="If no label specified, unlock entry with null label. Filtering is not supported.")
with self.argument_context('appconfig revision list') as c:
c.argument('key', help='If no key specified, return all keys by default. Support star sign as filters, for instance abc* means keys with abc as prefix.')
c.argument('label', help="If no label specified, list all labels. Support star sign as filters, for instance abc* means labels with abc as prefix. Use '\\0' for null label.")
with self.argument_context('appconfig feature show') as c:
c.argument('feature', help='Name of the feature flag to be retrieved')
c.argument('label', help="If no label specified, show entry with null label. Filtering is not supported.")
c.argument('fields', arg_type=feature_fields_arg_type)
with self.argument_context('appconfig feature set') as c:
c.argument('feature', validator=validate_feature, help="Name of the feature flag to be set. Only alphanumeric characters, '.', '-' and '_' are allowed.")
c.argument('label', help="If no label specified, set the feature flag with null label by default")
c.argument('description', help='Description of the feature flag to be set.')
with self.argument_context('appconfig feature delete') as c:
c.argument('feature', help='Key of the feature to be deleted. Support star sign as filters, for instance * means all key and abc* means keys with abc as prefix. Comma separated keys are not supported. Please provide escaped string if your feature name contains comma.')
c.argument('label', help="If no label specified, delete the feature flag with null label by default. Support star sign as filters, for instance * means all labels and abc* means labels with abc as prefix.")
with self.argument_context('appconfig feature list') as c:
c.argument('feature', help='Key of the feature to be listed. Support star sign as filters, for instance * means all key and abc* means keys with abc as prefix. Comma separated keys are not supported. Please provide escaped string if your feature name contains comma.')
c.argument('label', help="If no label specified, list all labels. Support star sign as filters, for instance * means all labels and abc* means labels with abc as prefix. Use '\\0' for null label.")
c.argument('fields', arg_type=feature_fields_arg_type)
c.argument('all_', help="List all feature flags.")
with self.argument_context('appconfig feature lock') as c:
c.argument('feature', help='Key of the feature to be locked.')
c.argument('label', help="If no label specified, lock the feature flag with null label by default.")
with self.argument_context('appconfig feature unlock') as c:
c.argument('feature', help='Key of the feature to be unlocked.')
c.argument('label', help="If no label specified, unlock the feature flag with null label by default.")
with self.argument_context('appconfig feature enable') as c:
c.argument('feature', help='Key of the feature to be enabled.')
c.argument('label', help="If no label specified, enable the feature flag with null label by default.")
with self.argument_context('appconfig feature disable') as c:
c.argument('feature', help='Key of the feature to be disabled.')
c.argument('label', help="If no label specified, disable the feature flag with null label by default.")
with self.argument_context('appconfig feature filter add') as c:
c.argument('feature', help='Name of the feature to which you want to add the filter.')
c.argument('label', help="If no label specified, add to the feature flag with null label by default.")
c.argument('filter_name', help='Name of the filter to be added.')
c.argument('filter_parameters', arg_type=filter_parameters_arg_type)
c.argument('index', type=int, help='Zero-based index in the list of filters where you want to insert the new filter. If no index is specified or index is invalid, filter will be added to the end of the list.')
with self.argument_context('appconfig feature filter delete') as c:
c.argument('feature', help='Name of the feature from which you want to delete the filter.')
c.argument('label', help="If no label specified, delete from the feature flag with null label by default.")
c.argument('filter_name', help='Name of the filter to be deleted.')
c.argument('index', type=int, help='Zero-based index of the filter to be deleted in case there are multiple instances with same filter name.')
c.argument('all_', help="Delete all filters associated with a feature flag.")
with self.argument_context('appconfig feature filter show') as c:
c.argument('feature', help='Name of the feature which contains the filter.')
c.argument('label', help="If no label specified, show the feature flag with null label by default.")
c.argument('filter_name', help='Name of the filter to be displayed.')
c.argument('index', type=int, help='Zero-based index of the filter to be displayed in case there are multiple instances with same filter name.')
with self.argument_context('appconfig feature filter list') as c:
c.argument('feature', help='Name of the feature whose filters you want to be displayed.')
c.argument('label', help="If no label specified, display filters from the feature flag with null label by default.")
c.argument('all_', help="List all filters associated with a feature flag.")
|
31,072 |
def install_packs(client: demisto_client,
host: str,
packs_to_install: list,
request_timeout: int = 999999,
is_nightly: bool = False):
""" Make a packs installation request.
Args:
client (demisto_client): The configured client to use.
host (str): The server URL.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
is_nightly (bool): Is the build nightly or not.
"""
if is_nightly:
install_nightly_packs(client, host, packs_to_install)
return
request_data = {
'packs': packs_to_install,
'ignoreWarnings': True
}
logging.info(f'Installing packs to server {host}')
packs_to_install_str = ', '.join([pack['id'] for pack in packs_to_install])
logging.debug(f'Installing the following packs in server {host}:\n{packs_to_install_str}')
# make the pack installation request
try:
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body=request_data,
accept='application/json',
_request_timeout=request_timeout)
if 200 <= status_code < 300:
packs_data = [{'ID': pack.get('id'), 'CurrentVersion': pack.get('currentVersion')} for
pack in
ast.literal_eval(response_data)]
logging.debug(f'The following packs were successfully installed on server {host}:\n{packs_data}')
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
raise Exception(f'Failed to install packs - with status code {status_code}\n{message}')
except Exception:
logging.exception('The request to install packs has failed.')
global SUCCESS_FLAG
SUCCESS_FLAG = False
|
def install_packs(client: demisto_client,
host: str,
packs_to_install: list,
request_timeout: int = 999999,
is_nightly: bool = False):
""" Make a packs installation request.
Args:
client (demisto_client): The configured client to use.
host (str): The server URL.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
is_nightly (bool): Is the build nightly or not.
"""
if is_nightly:
install_nightly_packs(client, host, packs_to_install)
return
request_data = {
'packs': packs_to_install,
'ignoreWarnings': True
}
logging.info(f'Installing packs on server {host}')
packs_to_install_str = ', '.join([pack['id'] for pack in packs_to_install])
logging.debug(f'Installing the following packs in server {host}:\n{packs_to_install_str}')
# make the pack installation request
try:
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body=request_data,
accept='application/json',
_request_timeout=request_timeout)
if 200 <= status_code < 300:
packs_data = [{'ID': pack.get('id'), 'CurrentVersion': pack.get('currentVersion')} for
pack in
ast.literal_eval(response_data)]
logging.debug(f'The following packs were successfully installed on server {host}:\n{packs_data}')
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
raise Exception(f'Failed to install packs - with status code {status_code}\n{message}')
except Exception:
logging.exception('The request to install packs has failed.')
global SUCCESS_FLAG
SUCCESS_FLAG = False
|
27,478 |
def transcribe_onprem(local_file_path, api_endpoint):
"""
Transcribe a short audio file using synchronous speech recognition on-prem
Args:
local_file_path: The path to local audio file, e.g. /path/audio.wav
api_endpoint: Endpoint to call for speech recognition, e.g. 0.0.0.0:10000
"""
from google.cloud import speech_v1p1beta1
from google.cloud.speech_v1p1beta1 import enums
import grpc
import io
# api_endpoint = '0.0.0.0:10000'
# local_file_path = 'resources/brooklyn_bridge.raw'
# Set the API endpoint to direct requests to
client_options = {"api_endpoint": api_endpoint}
# Create a gRPC channel to your server
channel = grpc.insecure_channel(target=api_endpoint)
client = speech_v1p1beta1.SpeechClient(
client_options=client_options, channel=channel
)
# The language of the supplied audio
language_code = "en-US"
# Sample rate in Hertz of the audio data sent
sample_rate_hertz = 8000
# Encoding of audio data sent. This sample sets this explicitly.
# This field is optional for FLAC and WAV audio formats.
encoding = enums.RecognitionConfig.AudioEncoding.LINEAR16
config = {
"encoding": encoding,
"language_code": language_code,
"sample_rate_hertz": sample_rate_hertz,
}
with io.open(local_file_path, "rb") as f:
content = f.read()
audio = {"content": content}
response = client.recognize(config, audio)
for result in response.results:
# First alternative is the most probable result
alternative = result.alternatives[0]
print(u"Transcript: {}".format(alternative.transcript))
# [END speech_transcribe_onprem]
|
def transcribe_onprem(local_file_path, api_endpoint):
"""
Transcribe a short audio file using synchronous speech recognition on-prem
Args:
local_file_path: The path to local audio file, e.g. /path/audio.wav
api_endpoint: Endpoint to call for speech recognition, e.g. 0.0.0.0:10000
"""
from google.cloud import speech_v1p1beta1
from google.cloud.speech_v1p1beta1 import enums
import grpc
import io
# api_endpoint = '0.0.0.0:10000'
# local_file_path = 'resources/brooklyn_bridge.raw'
# Set the API endpoint to direct requests to
client_options = {"api_endpoint": api_endpoint}
# Create a gRPC channel to your server
channel = grpc.insecure_channel(target=api_endpoint)
client = speech_v1p1beta1.SpeechClient(
client_options=client_options, channel=channel
)
# The language of the supplied audio
language_code = "en-US"
# Sample rate in Hertz of the audio data sent
sample_rate_hertz = 8000
# Encoding of audio data sent. This sample sets this explicitly.
# This field is optional for FLAC and WAV audio formats.
encoding = enums.RecognitionConfig.AudioEncoding.LINEAR16
config = {
"encoding": encoding,
"language_code": language_code,
"sample_rate_hertz": sample_rate_hertz,
}
with io.open(local_file_path, "rb") as f:
content = f.read()
audio = {"content": content}
response = client.recognize(config, audio)
for result in response.results:
# First alternative is the most probable result
alternative = result.alternatives[0]
print(f"Transcript: {alternative.transcript}")
# [END speech_transcribe_onprem]
|
31,719 |
def parse_list(parse_all, header, value, list_name, list_separator):
validate_args(parse_all, header, value)
list_data = demisto.executeCommand("getList", {'listName': list_name})[0]['Contents']
context = {
"ListName": list_name,
"Header": header,
"Value": value,
"ParseAll": parse_all
}
validate_list_exists(list_data)
headers, lines = list_to_headers_and_lines(list_data, list_separator)
if parse_all.lower() == 'true':
command_results = parse_relevant_rows(headers, lines, header, value, context, parse_all=True)
else:
validate_header_exists(headers, header)
command_results = parse_relevant_rows(headers, lines, header, value, context)
return command_results
|
def parse_list(parse_all, header, value, list_name, list_separator: str):
validate_args(parse_all, header, value)
list_data = demisto.executeCommand("getList", {'listName': list_name})[0]['Contents']
context = {
"ListName": list_name,
"Header": header,
"Value": value,
"ParseAll": parse_all
}
validate_list_exists(list_data)
headers, lines = list_to_headers_and_lines(list_data, list_separator)
if parse_all.lower() == 'true':
command_results = parse_relevant_rows(headers, lines, header, value, context, parse_all=True)
else:
validate_header_exists(headers, header)
command_results = parse_relevant_rows(headers, lines, header, value, context)
return command_results
|
26,403 |
def main(args: Union[List[str]] = None, stdout: TextIO = sys.stdout) -> int:
"""Run the main loop for toil-cwl-runner."""
# Remove cwltool logger's stream handler so it uses Toil's
cwllogger.removeHandler(defaultStreamHandler)
if args is None:
args = sys.argv[1:]
config = Config()
config.disableChaining = True
config.cwl = True
parser = argparse.ArgumentParser()
addOptions(parser, config)
parser.add_argument("cwltool", type=str)
parser.add_argument("cwljob", nargs=argparse.REMAINDER)
# Will override the "jobStore" positional argument, enables
# user to select jobStore or get a default from logic one below.
parser.add_argument("--jobStore", "--jobstore", dest="jobStore", type=str)
parser.add_argument("--not-strict", action="store_true")
parser.add_argument(
"--enable-dev",
action="store_true",
help="Enable loading and running development versions of CWL",
)
parser.add_argument("--quiet", dest="logLevel", action="store_const", const="ERROR")
parser.add_argument("--basedir", type=str) # TODO: Might be hard-coded?
parser.add_argument("--outdir", type=str, default=os.getcwd())
parser.add_argument("--version", action="version", version=baseVersion)
dockergroup = parser.add_mutually_exclusive_group()
dockergroup.add_argument(
"--user-space-docker-cmd",
help="(Linux/OS X only) Specify a user space docker command (like "
"udocker or dx-docker) that will be used to call 'pull' and 'run'",
)
dockergroup.add_argument(
"--singularity",
action="store_true",
default=False,
help="[experimental] Use Singularity runtime for running containers. "
"Requires Singularity v2.6.1+ and Linux with kernel version v3.18+ or "
"with overlayfs support backported.",
)
dockergroup.add_argument(
"--no-container",
action="store_true",
help="Do not execute jobs in a "
"Docker container, even when `DockerRequirement` "
"is specified under `hints`.",
)
dockergroup.add_argument(
"--leave-container",
action="store_false",
default=True,
help="Do not delete Docker container used by jobs after they exit",
dest="rm_container",
)
parser.add_argument(
"--preserve-environment",
type=str,
nargs="+",
help="Preserve specified environment variables when running"
" CommandLineTools",
metavar=("VAR1 VAR2"),
default=("PATH",),
dest="preserve_environment",
)
parser.add_argument(
"--preserve-entire-environment",
action="store_true",
help="Preserve all environment variable when running " "CommandLineTools.",
default=False,
dest="preserve_entire_environment",
)
parser.add_argument(
"--destBucket",
type=str,
help="Specify a cloud bucket endpoint for output files.",
)
parser.add_argument("--beta-dependency-resolvers-configuration", default=None)
parser.add_argument("--beta-dependencies-directory", default=None)
parser.add_argument("--beta-use-biocontainers", default=None, action="store_true")
parser.add_argument("--beta-conda-dependencies", default=None, action="store_true")
parser.add_argument(
"--tmpdir-prefix",
type=Text,
help="Path prefix for temporary directories",
default="tmp",
)
parser.add_argument(
"--tmp-outdir-prefix",
type=Text,
help="Path prefix for intermediate output directories",
default="tmp",
)
parser.add_argument(
"--force-docker-pull",
action="store_true",
default=False,
dest="force_docker_pull",
help="Pull latest docker image even if it is locally present",
)
parser.add_argument(
"--no-match-user",
action="store_true",
default=False,
help="Disable passing the current uid to `docker run --user`",
)
parser.add_argument(
"--no-read-only",
action="store_true",
default=False,
help="Do not set root directory in the container as read-only",
)
parser.add_argument(
"--strict-memory-limit",
action="store_true",
help="When running with "
"software containers and the Docker engine, pass either the "
"calculated memory allocation from ResourceRequirements or the "
"default of 1 gigabyte to Docker's --memory option.",
)
parser.add_argument(
"--relax-path-checks",
action="store_true",
default=False,
help="Relax requirements on path names to permit "
"spaces and hash characters.",
dest="relax_path_checks",
)
parser.add_argument(
"--default-container",
help="Specify a default docker container that will be "
"used if the workflow fails to specify one.",
)
provgroup = parser.add_argument_group(
"Options for recording provenance " "information of the execution"
)
provgroup.add_argument(
"--provenance",
help="Save provenance to specified folder as a "
"Research Object that captures and aggregates "
"workflow execution and data products.",
type=Text,
)
provgroup.add_argument(
"--enable-user-provenance",
default=False,
action="store_true",
help="Record user account info as part of provenance.",
dest="user_provenance",
)
provgroup.add_argument(
"--disable-user-provenance",
default=False,
action="store_false",
help="Do not record user account info in provenance.",
dest="user_provenance",
)
provgroup.add_argument(
"--enable-host-provenance",
default=False,
action="store_true",
help="Record host info as part of provenance.",
dest="host_provenance",
)
provgroup.add_argument(
"--disable-host-provenance",
default=False,
action="store_false",
help="Do not record host info in provenance.",
dest="host_provenance",
)
provgroup.add_argument(
"--orcid",
help="Record user ORCID identifier as part of "
"provenance, e.g. https://orcid.org/0000-0002-1825-0097 "
"or 0000-0002-1825-0097. Alternatively the environment variable "
"ORCID may be set.",
dest="orcid",
default=os.environ.get("ORCID", ""),
type=Text,
)
provgroup.add_argument(
"--full-name",
help="Record full name of user as part of provenance, "
"e.g. Josiah Carberry. You may need to use shell quotes to preserve "
"spaces. Alternatively the environment variable CWL_FULL_NAME may "
"be set.",
dest="cwl_full_name",
default=os.environ.get("CWL_FULL_NAME", ""),
type=Text,
)
# Problem: we want to keep our job store somewhere auto-generated based on
# our options, unless overridden by... an option. So we will need to parse
# options twice, because we need to feed the parser the job store.
# Propose a local workdir, probably under /tmp.
# mkdtemp actually creates the directory, but
# toil requires that the directory not exist,
# since it is going to be our jobstore,
# so make it and delete it and allow
# toil to create it again (!)
workdir = tempfile.mkdtemp()
os.rmdir(workdir)
# we use the workdir as the default jobStore:
options = parser.parse_args([workdir] + args)
# if tmpdir_prefix is not the default value, set workDir if unset, and move
# workdir and the job store under it
if options.tmpdir_prefix != "tmp":
workdir = cwltool.utils.create_tmp_dir(options.tmpdir_prefix)
os.rmdir(workdir)
# Re-parse arguments with the new default jobstore under the temp dir.
# It still might be overridden by a --jobStore option
options = parser.parse_args([workdir] + args)
if options.workDir is None:
# We need to override workDir because by default Toil will pick
# somewhere under the system temp directory if unset, ignoring
# --tmpdir-prefix.
#
# If set, workDir needs to exist, so we directly use the prefix
options.workDir = cwltool.utils.create_tmp_dir(options.tmpdir_prefix)
if options.provisioner and not options.jobStore:
raise NoSuchJobStoreException(
"Please specify a jobstore with the --jobStore option when "
"specifying a provisioner."
)
if options.batchSystem == "kubernetes":
options.singularity = True
use_container = not options.no_container
if options.logLevel:
# Make sure cwltool uses Toil's log level.
# Applies only on the leader.
cwllogger.setLevel(options.logLevel.upper())
outdir = os.path.abspath(options.outdir)
tmp_outdir_prefix = os.path.abspath(options.tmp_outdir_prefix)
fileindex = dict() # type: ignore
existing = dict() # type: ignore
conf_file = getattr(options, "beta_dependency_resolvers_configuration", None)
use_conda_dependencies = getattr(options, "beta_conda_dependencies", None)
job_script_provider = None
if conf_file or use_conda_dependencies:
dependencies_configuration = DependenciesConfiguration(options)
job_script_provider = dependencies_configuration
options.default_container = None
runtime_context = cwltool.context.RuntimeContext(vars(options))
runtime_context.find_default_container = functools.partial(
find_default_container, options
)
runtime_context.workdir = workdir # type: ignore
runtime_context.move_outputs = "leave"
runtime_context.rm_tmpdir = False
loading_context = cwltool.context.LoadingContext(vars(options))
if options.provenance:
research_obj = cwltool.provenance.ResearchObject(
temp_prefix_ro=options.tmp_outdir_prefix,
orcid=options.orcid,
full_name=options.cwl_full_name,
fsaccess=runtime_context.make_fs_access(""),
)
runtime_context.research_obj = research_obj
with Toil(options) as toil:
if options.restart:
outobj = toil.restart()
else:
loading_context.hints = [
{
"class": "ResourceRequirement",
"coresMin": toil.config.defaultCores,
"ramMin": toil.config.defaultMemory / (2 ** 20),
"outdirMin": toil.config.defaultDisk / (2 ** 20),
"tmpdirMin": 0,
}
]
loading_context.construct_tool_object = toil_make_tool
loading_context.resolver = cwltool.resolver.tool_resolver
loading_context.strict = not options.not_strict
options.workflow = options.cwltool
options.job_order = options.cwljob
try:
uri, tool_file_uri = cwltool.load_tool.resolve_tool_uri(
options.cwltool,
loading_context.resolver,
loading_context.fetcher_constructor,
)
except schema_salad.exceptions.ValidationException:
print('You may be getting this error because your arguments are incorrect or out of order.' +
usage_message, file=sys.stderr)
raise
options.tool_help = None
options.debug = options.logLevel == "DEBUG"
job_order_object, options.basedir, jobloader = cwltool.main.load_job_order(
options,
sys.stdin,
loading_context.fetcher_constructor,
loading_context.overrides_list,
tool_file_uri,
)
loading_context, workflowobj, uri = cwltool.load_tool.fetch_document(
uri, loading_context
)
loading_context, uri = cwltool.load_tool.resolve_and_validate_document(
loading_context, workflowobj, uri
)
loading_context.overrides_list.extend(
cast(
List[CWLObjectType],
loading_context.metadata.get("cwltool:overrides", []),
)
)
document_loader = loading_context.loader
metadata = loading_context.metadata
processobj = document_loader.idx
if options.provenance and runtime_context.research_obj:
runtime_context.research_obj.packed_workflow(
cwltool.main.print_pack(loading_context, uri)
)
try:
tool = cwltool.load_tool.make_tool(uri, loading_context)
except cwltool.process.UnsupportedRequirement as err:
logging.error(err)
return 33
runtime_context.secret_store = SecretStore()
try:
initialized_job_order = cwltool.main.init_job_order(
job_order_object,
options,
tool,
jobloader,
sys.stdout,
secret_store=runtime_context.secret_store,
)
except SystemExit as e:
if e.code == 2: # raised by argparse's parse_args() function
print('If both a cwl file and a yml file were provided, this may be the argument order.' +
usage_message, file=sys.stderr)
raise
fs_access = cwltool.stdfsaccess.StdFsAccess(options.basedir)
fill_in_defaults(tool.tool["inputs"], initialized_job_order, fs_access)
for inp in tool.tool["inputs"]:
def set_secondary(fileobj):
if isinstance(fileobj, Mapping) and fileobj.get("class") == "File":
if "secondaryFiles" not in fileobj:
# inits all secondary files with 'file://' schema
# later changed to 'toilfs:' when imported into the jobstore
fileobj["secondaryFiles"] = [
{
"location": cwltool.builder.substitute(
fileobj["location"], sf["pattern"]
),
"class": "File",
}
for sf in inp["secondaryFiles"]
]
if isinstance(fileobj, MutableSequence):
for entry in fileobj:
set_secondary(entry)
if shortname(inp["id"]) in initialized_job_order and inp.get(
"secondaryFiles"
):
set_secondary(initialized_job_order[shortname(inp["id"])])
runtime_context.use_container = use_container
runtime_context.tmp_outdir_prefix = os.path.realpath(tmp_outdir_prefix)
runtime_context.job_script_provider = job_script_provider
runtime_context.force_docker_pull = options.force_docker_pull
runtime_context.no_match_user = options.no_match_user
runtime_context.no_read_only = options.no_read_only
runtime_context.basedir = options.basedir
runtime_context.move_outputs = "move"
# We instantiate an early builder object here to populate indirect
# secondaryFile references using cwltool's library because we need
# to resolve them before toil imports them into the filestore.
# A second builder will be built in the job's run method when toil
# actually starts the cwl job.
builder = tool._init_job(initialized_job_order, runtime_context)
# make sure this doesn't add listing items; if shallow_listing is
# selected, it will discover dirs one deep and then again later on
# (producing 2+ deep listings instead of only 1)
builder.loadListing = "no_listing"
builder.bind_input(
tool.inputs_record_schema,
initialized_job_order,
discover_secondaryFiles=True,
)
def path_to_loc(obj):
if "location" not in obj and "path" in obj:
obj["location"] = obj["path"]
del obj["path"]
def import_files(inner_tool):
visit_class(inner_tool, ("File", "Directory"), path_to_loc)
visit_class(
inner_tool, ("File",), functools.partial(add_sizes, fs_access)
)
normalizeFilesDirs(inner_tool)
adjustFileObjs(
inner_tool,
functools.partial(
uploadFile,
toil.importFile,
fileindex,
existing,
skip_broken=True,
),
)
# files with the 'file://' uri are imported into the jobstore and
# changed to 'toilfs:'
import_files(initialized_job_order)
visitSteps(tool, import_files)
for job_name, job_params in initialized_job_order.items():
rm_unprocessed_secondary_files(job_params)
try:
wf1, _ = makeJob(
tool=tool,
jobobj={},
runtime_context=runtime_context,
conditional=None,
)
except cwltool.process.UnsupportedRequirement as err:
logging.error(err)
return 33
wf1.cwljob = initialized_job_order
outobj = toil.start(wf1)
outobj = resolve_dict_w_promises(outobj)
# Stage files. Specify destination bucket if specified in CLI
# options. If destination bucket not passed in,
# options.destBucket's value will be None.
toilStageFiles(toil, outobj, outdir, destBucket=options.destBucket)
if runtime_context.research_obj is not None:
runtime_context.research_obj.create_job(outobj, True)
def remove_at_id(doc):
if isinstance(doc, MutableMapping):
for key in list(doc.keys()):
if key == "@id":
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
if isinstance(value, MutableSequence):
for entry in value:
if isinstance(value, MutableMapping):
remove_at_id(entry)
remove_at_id(outobj)
visit_class(
outobj,
("File",),
functools.partial(add_sizes, runtime_context.make_fs_access("")),
)
prov_dependencies = cwltool.main.prov_deps(
workflowobj, document_loader, uri
)
runtime_context.research_obj.generate_snapshot(prov_dependencies)
runtime_context.research_obj.close(options.provenance)
if not options.destBucket:
visit_class(
outobj,
("File",),
functools.partial(
compute_checksums, cwltool.stdfsaccess.StdFsAccess("")
),
)
visit_class(outobj, ("File",), MutationManager().unset_generation)
stdout.write(json.dumps(outobj, indent=4))
return 0
|
def main(args: Union[List[str]] = None, stdout: TextIO = sys.stdout) -> int:
"""Run the main loop for toil-cwl-runner."""
# Remove cwltool logger's stream handler so it uses Toil's
cwllogger.removeHandler(defaultStreamHandler)
if args is None:
args = sys.argv[1:]
config = Config()
config.disableChaining = True
config.cwl = True
parser = argparse.ArgumentParser()
addOptions(parser, config)
parser.add_argument("cwltool", type=str)
parser.add_argument("cwljob", nargs=argparse.REMAINDER)
# Will override the "jobStore" positional argument, enables
# user to select jobStore or get a default from logic one below.
parser.add_argument("--jobStore", "--jobstore", dest="jobStore", type=str)
parser.add_argument("--not-strict", action="store_true")
parser.add_argument(
"--enable-dev",
action="store_true",
help="Enable loading and running development versions of CWL",
)
parser.add_argument("--quiet", dest="logLevel", action="store_const", const="ERROR")
parser.add_argument("--basedir", type=str) # TODO: Might be hard-coded?
parser.add_argument("--outdir", type=str, default=os.getcwd())
parser.add_argument("--version", action="version", version=baseVersion)
dockergroup = parser.add_mutually_exclusive_group()
dockergroup.add_argument(
"--user-space-docker-cmd",
help="(Linux/OS X only) Specify a user space docker command (like "
"udocker or dx-docker) that will be used to call 'pull' and 'run'",
)
dockergroup.add_argument(
"--singularity",
action="store_true",
default=False,
help="[experimental] Use Singularity runtime for running containers. "
"Requires Singularity v2.6.1+ and Linux with kernel version v3.18+ or "
"with overlayfs support backported.",
)
dockergroup.add_argument(
"--no-container",
action="store_true",
help="Do not execute jobs in a "
"Docker container, even when `DockerRequirement` "
"is specified under `hints`.",
)
dockergroup.add_argument(
"--leave-container",
action="store_false",
default=True,
help="Do not delete Docker container used by jobs after they exit",
dest="rm_container",
)
parser.add_argument(
"--preserve-environment",
type=str,
nargs="+",
help="Preserve specified environment variables when running"
" CommandLineTools",
metavar=("VAR1 VAR2"),
default=("PATH",),
dest="preserve_environment",
)
parser.add_argument(
"--preserve-entire-environment",
action="store_true",
help="Preserve all environment variable when running " "CommandLineTools.",
default=False,
dest="preserve_entire_environment",
)
parser.add_argument(
"--destBucket",
type=str,
help="Specify a cloud bucket endpoint for output files.",
)
parser.add_argument("--beta-dependency-resolvers-configuration", default=None)
parser.add_argument("--beta-dependencies-directory", default=None)
parser.add_argument("--beta-use-biocontainers", default=None, action="store_true")
parser.add_argument("--beta-conda-dependencies", default=None, action="store_true")
parser.add_argument(
"--tmpdir-prefix",
type=Text,
help="Path prefix for temporary directories",
default="tmp",
)
parser.add_argument(
"--tmp-outdir-prefix",
type=Text,
help="Path prefix for intermediate output directories",
default="tmp",
)
parser.add_argument(
"--force-docker-pull",
action="store_true",
default=False,
dest="force_docker_pull",
help="Pull latest docker image even if it is locally present",
)
parser.add_argument(
"--no-match-user",
action="store_true",
default=False,
help="Disable passing the current uid to `docker run --user`",
)
parser.add_argument(
"--no-read-only",
action="store_true",
default=False,
help="Do not set root directory in the container as read-only",
)
parser.add_argument(
"--strict-memory-limit",
action="store_true",
help="When running with "
"software containers and the Docker engine, pass either the "
"calculated memory allocation from ResourceRequirements or the "
"default of 1 gigabyte to Docker's --memory option.",
)
parser.add_argument(
"--relax-path-checks",
action="store_true",
default=False,
help="Relax requirements on path names to permit "
"spaces and hash characters.",
dest="relax_path_checks",
)
parser.add_argument(
"--default-container",
help="Specify a default docker container that will be "
"used if the workflow fails to specify one.",
)
provgroup = parser.add_argument_group(
"Options for recording provenance " "information of the execution"
)
provgroup.add_argument(
"--provenance",
help="Save provenance to specified folder as a "
"Research Object that captures and aggregates "
"workflow execution and data products.",
type=Text,
)
provgroup.add_argument(
"--enable-user-provenance",
default=False,
action="store_true",
help="Record user account info as part of provenance.",
dest="user_provenance",
)
provgroup.add_argument(
"--disable-user-provenance",
default=False,
action="store_false",
help="Do not record user account info in provenance.",
dest="user_provenance",
)
provgroup.add_argument(
"--enable-host-provenance",
default=False,
action="store_true",
help="Record host info as part of provenance.",
dest="host_provenance",
)
provgroup.add_argument(
"--disable-host-provenance",
default=False,
action="store_false",
help="Do not record host info in provenance.",
dest="host_provenance",
)
provgroup.add_argument(
"--orcid",
help="Record user ORCID identifier as part of "
"provenance, e.g. https://orcid.org/0000-0002-1825-0097 "
"or 0000-0002-1825-0097. Alternatively the environment variable "
"ORCID may be set.",
dest="orcid",
default=os.environ.get("ORCID", ""),
type=Text,
)
provgroup.add_argument(
"--full-name",
help="Record full name of user as part of provenance, "
"e.g. Josiah Carberry. You may need to use shell quotes to preserve "
"spaces. Alternatively the environment variable CWL_FULL_NAME may "
"be set.",
dest="cwl_full_name",
default=os.environ.get("CWL_FULL_NAME", ""),
type=Text,
)
# Problem: we want to keep our job store somewhere auto-generated based on
# our options, unless overridden by... an option. So we will need to parse
# options twice, because we need to feed the parser the job store.
# Propose a local workdir, probably under /tmp.
# mkdtemp actually creates the directory, but
# toil requires that the directory not exist,
# since it is going to be our jobstore,
# so make it and delete it and allow
# toil to create it again (!)
workdir = tempfile.mkdtemp()
os.rmdir(workdir)
# we use the workdir as the default jobStore:
options = parser.parse_args([workdir] + args)
# if tmpdir_prefix is not the default value, set workDir if unset, and move
# workdir and the job store under it
if options.tmpdir_prefix != "tmp":
workdir = cwltool.utils.create_tmp_dir(options.tmpdir_prefix)
os.rmdir(workdir)
# Re-parse arguments with the new default jobstore under the temp dir.
# It still might be overridden by a --jobStore option
options = parser.parse_args([workdir] + args)
if options.workDir is None:
# We need to override workDir because by default Toil will pick
# somewhere under the system temp directory if unset, ignoring
# --tmpdir-prefix.
#
# If set, workDir needs to exist, so we directly use the prefix
options.workDir = cwltool.utils.create_tmp_dir(options.tmpdir_prefix)
if options.provisioner and not options.jobStore:
raise NoSuchJobStoreException(
"Please specify a jobstore with the --jobStore option when "
"specifying a provisioner."
)
if options.batchSystem == "kubernetes":
options.singularity = True
use_container = not options.no_container
if options.logLevel:
# Make sure cwltool uses Toil's log level.
# Applies only on the leader.
cwllogger.setLevel(options.logLevel.upper())
outdir = os.path.abspath(options.outdir)
tmp_outdir_prefix = os.path.abspath(options.tmp_outdir_prefix)
fileindex = dict() # type: ignore
existing = dict() # type: ignore
conf_file = getattr(options, "beta_dependency_resolvers_configuration", None)
use_conda_dependencies = getattr(options, "beta_conda_dependencies", None)
job_script_provider = None
if conf_file or use_conda_dependencies:
dependencies_configuration = DependenciesConfiguration(options)
job_script_provider = dependencies_configuration
options.default_container = None
runtime_context = cwltool.context.RuntimeContext(vars(options))
runtime_context.find_default_container = functools.partial(
find_default_container, options
)
runtime_context.workdir = workdir # type: ignore
runtime_context.move_outputs = "leave"
runtime_context.rm_tmpdir = False
loading_context = cwltool.context.LoadingContext(vars(options))
if options.provenance:
research_obj = cwltool.provenance.ResearchObject(
temp_prefix_ro=options.tmp_outdir_prefix,
orcid=options.orcid,
full_name=options.cwl_full_name,
fsaccess=runtime_context.make_fs_access(""),
)
runtime_context.research_obj = research_obj
with Toil(options) as toil:
if options.restart:
outobj = toil.restart()
else:
loading_context.hints = [
{
"class": "ResourceRequirement",
"coresMin": toil.config.defaultCores,
"ramMin": toil.config.defaultMemory / (2 ** 20),
"outdirMin": toil.config.defaultDisk / (2 ** 20),
"tmpdirMin": 0,
}
]
loading_context.construct_tool_object = toil_make_tool
loading_context.resolver = cwltool.resolver.tool_resolver
loading_context.strict = not options.not_strict
options.workflow = options.cwltool
options.job_order = options.cwljob
try:
uri, tool_file_uri = cwltool.load_tool.resolve_tool_uri(
options.cwltool,
loading_context.resolver,
loading_context.fetcher_constructor,
)
except schema_salad.exceptions.ValidationException:
print('You may be getting this error because your arguments are incorrect or out of order.' +
usage_message, file=sys.stderr)
raise
options.tool_help = None
options.debug = options.logLevel == "DEBUG"
job_order_object, options.basedir, jobloader = cwltool.main.load_job_order(
options,
sys.stdin,
loading_context.fetcher_constructor,
loading_context.overrides_list,
tool_file_uri,
)
loading_context, workflowobj, uri = cwltool.load_tool.fetch_document(
uri, loading_context
)
loading_context, uri = cwltool.load_tool.resolve_and_validate_document(
loading_context, workflowobj, uri
)
loading_context.overrides_list.extend(
cast(
List[CWLObjectType],
loading_context.metadata.get("cwltool:overrides", []),
)
)
document_loader = loading_context.loader
metadata = loading_context.metadata
processobj = document_loader.idx
if options.provenance and runtime_context.research_obj:
runtime_context.research_obj.packed_workflow(
cwltool.main.print_pack(loading_context, uri)
)
try:
tool = cwltool.load_tool.make_tool(uri, loading_context)
except cwltool.process.UnsupportedRequirement as err:
logging.error(err)
return 33
runtime_context.secret_store = SecretStore()
try:
initialized_job_order = cwltool.main.init_job_order(
job_order_object,
options,
tool,
jobloader,
sys.stdout,
secret_store=runtime_context.secret_store,
)
except SystemExit as e:
if e.code == 2: # raised by argparse's parse_args() function
print('If both a CWL file and an input object (YAML/JSON) file were provided, this may be the argument order.' +
usage_message, file=sys.stderr)
raise
fs_access = cwltool.stdfsaccess.StdFsAccess(options.basedir)
fill_in_defaults(tool.tool["inputs"], initialized_job_order, fs_access)
for inp in tool.tool["inputs"]:
def set_secondary(fileobj):
if isinstance(fileobj, Mapping) and fileobj.get("class") == "File":
if "secondaryFiles" not in fileobj:
# inits all secondary files with 'file://' schema
# later changed to 'toilfs:' when imported into the jobstore
fileobj["secondaryFiles"] = [
{
"location": cwltool.builder.substitute(
fileobj["location"], sf["pattern"]
),
"class": "File",
}
for sf in inp["secondaryFiles"]
]
if isinstance(fileobj, MutableSequence):
for entry in fileobj:
set_secondary(entry)
if shortname(inp["id"]) in initialized_job_order and inp.get(
"secondaryFiles"
):
set_secondary(initialized_job_order[shortname(inp["id"])])
runtime_context.use_container = use_container
runtime_context.tmp_outdir_prefix = os.path.realpath(tmp_outdir_prefix)
runtime_context.job_script_provider = job_script_provider
runtime_context.force_docker_pull = options.force_docker_pull
runtime_context.no_match_user = options.no_match_user
runtime_context.no_read_only = options.no_read_only
runtime_context.basedir = options.basedir
runtime_context.move_outputs = "move"
# We instantiate an early builder object here to populate indirect
# secondaryFile references using cwltool's library because we need
# to resolve them before toil imports them into the filestore.
# A second builder will be built in the job's run method when toil
# actually starts the cwl job.
builder = tool._init_job(initialized_job_order, runtime_context)
# make sure this doesn't add listing items; if shallow_listing is
# selected, it will discover dirs one deep and then again later on
# (producing 2+ deep listings instead of only 1)
builder.loadListing = "no_listing"
builder.bind_input(
tool.inputs_record_schema,
initialized_job_order,
discover_secondaryFiles=True,
)
def path_to_loc(obj):
if "location" not in obj and "path" in obj:
obj["location"] = obj["path"]
del obj["path"]
def import_files(inner_tool):
visit_class(inner_tool, ("File", "Directory"), path_to_loc)
visit_class(
inner_tool, ("File",), functools.partial(add_sizes, fs_access)
)
normalizeFilesDirs(inner_tool)
adjustFileObjs(
inner_tool,
functools.partial(
uploadFile,
toil.importFile,
fileindex,
existing,
skip_broken=True,
),
)
# files with the 'file://' uri are imported into the jobstore and
# changed to 'toilfs:'
import_files(initialized_job_order)
visitSteps(tool, import_files)
for job_name, job_params in initialized_job_order.items():
rm_unprocessed_secondary_files(job_params)
try:
wf1, _ = makeJob(
tool=tool,
jobobj={},
runtime_context=runtime_context,
conditional=None,
)
except cwltool.process.UnsupportedRequirement as err:
logging.error(err)
return 33
wf1.cwljob = initialized_job_order
outobj = toil.start(wf1)
outobj = resolve_dict_w_promises(outobj)
# Stage files. Specify destination bucket if specified in CLI
# options. If destination bucket not passed in,
# options.destBucket's value will be None.
toilStageFiles(toil, outobj, outdir, destBucket=options.destBucket)
if runtime_context.research_obj is not None:
runtime_context.research_obj.create_job(outobj, True)
def remove_at_id(doc):
if isinstance(doc, MutableMapping):
for key in list(doc.keys()):
if key == "@id":
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
if isinstance(value, MutableSequence):
for entry in value:
if isinstance(value, MutableMapping):
remove_at_id(entry)
remove_at_id(outobj)
visit_class(
outobj,
("File",),
functools.partial(add_sizes, runtime_context.make_fs_access("")),
)
prov_dependencies = cwltool.main.prov_deps(
workflowobj, document_loader, uri
)
runtime_context.research_obj.generate_snapshot(prov_dependencies)
runtime_context.research_obj.close(options.provenance)
if not options.destBucket:
visit_class(
outobj,
("File",),
functools.partial(
compute_checksums, cwltool.stdfsaccess.StdFsAccess("")
),
)
visit_class(outobj, ("File",), MutationManager().unset_generation)
stdout.write(json.dumps(outobj, indent=4))
return 0
|
12,603 |
def construct_coverage_config(
source_roots: SourceRoots, python_files: List[str], test_time: Optional[bool] = False,
) -> str:
# A map from source root stripped source to its source root. eg:
# {'pants/testutil/subsystem/util.py': 'src/python'}
# This is so coverage reports referencing /chroot/path/pants/testutil/subsystem/util.py can be mapped
# back to the actual sources they reference when merging coverage reports.
init_files = list(identify_missing_init_files(list(python_files)))
def source_root_stripped_source_and_source_root(file_name):
source_root = source_roots.find_by_path(file_name)
source_root_stripped_path = file_name[len(source_root.path) + 1 :]
return (source_root_stripped_path, source_root.path)
source_to_target_base = dict(
source_root_stripped_source_and_source_root(filename)
for filename in sorted(python_files) + init_files
)
config_parser = configparser.ConfigParser()
config_parser.read_file(StringIO(DEFAULT_COVERAGE_CONFIG))
ensure_section(config_parser, "run")
config_parser.set("run", "plugins", COVERAGE_PLUGIN_MODULE_NAME)
config_parser.add_section(COVERAGE_PLUGIN_MODULE_NAME)
config_parser.set(
COVERAGE_PLUGIN_MODULE_NAME, "source_to_target_base", json.dumps(source_to_target_base)
)
config_parser.set(COVERAGE_PLUGIN_MODULE_NAME, "test_time", json.dumps(test_time))
config = StringIO()
config_parser.write(config)
return config.getvalue()
|
def construct_coverage_config(
source_roots: SourceRoots, python_files: List[str], test_time: Optional[bool] = False,
) -> str:
# A map from source root stripped source to its source root. eg:
# {'pants/testutil/subsystem/util.py': 'src/python'}
# This is so coverage reports referencing /chroot/path/pants/testutil/subsystem/util.py can be mapped
# back to the actual sources they reference when merging coverage reports.
init_files = list(identify_missing_init_files(list(python_files)))
def source_root_stripped_source_and_source_root(file_name):
source_root = source_roots.find_by_path(file_name)
source_root_stripped_path = file_name[len(source_root.path) + 1 :]
return (source_root_stripped_path, source_root.path)
source_to_target_base = dict(
source_root_stripped_source_and_source_root(filename)
for filename in sorted(python_files, *init_files)
)
config_parser = configparser.ConfigParser()
config_parser.read_file(StringIO(DEFAULT_COVERAGE_CONFIG))
ensure_section(config_parser, "run")
config_parser.set("run", "plugins", COVERAGE_PLUGIN_MODULE_NAME)
config_parser.add_section(COVERAGE_PLUGIN_MODULE_NAME)
config_parser.set(
COVERAGE_PLUGIN_MODULE_NAME, "source_to_target_base", json.dumps(source_to_target_base)
)
config_parser.set(COVERAGE_PLUGIN_MODULE_NAME, "test_time", json.dumps(test_time))
config = StringIO()
config_parser.write(config)
return config.getvalue()
|
42,898 |
def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments (pi/4, pi/2), and the two phase-shifters (see :class:`ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
`external_phase`, while we call the phase shift between the beamsplitters
`internal_phase`.
The algorithm applied in this function makes use of the following identity:
::
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by alpha and beta are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
|
def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments (pi/4, pi/2), and the two phase-shifters (see :class:`ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
``external_phase``, while we call the phase shift between the beamsplitters
`internal_phase`.
The algorithm applied in this function makes use of the following identity:
::
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by alpha and beta are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
|
13,980 |
def rauc(signal, baseline=None, bin_duration=None, t_start=None, t_stop=None):
'''
Calculate the rectified area under the curve (RAUC) for an AnalogSignal.
The signal is optionally divided into bins with duration `bin_duration`,
and the rectified signal (absolute value) is integrated within each bin to
find the area under the curve. The mean or median of the signal or an
arbitrary baseline may optionally be subtracted before rectification. If
the number of bins is 1 (default), a single value is returned for each
channel in the input signal. Otherwise, an AnalogSignal containing the
values for each bin is returned along with the times of the centers of the
bins.
Parameters
----------
signal : neo.AnalogSignal
The signal to integrate. If `signal` contains more than one channel,
each is integrated separately.
bin_duation : quantities.Quantity
The length of time that each integration should span. If None, there
will be only one bin spanning the entire signal duration. If
`bin_duration` does not divide evenly into the signal duration, the end
of the signal is padded with zeros to accomodate the final,
overextending bin.
Default: None
baseline : string or quantities.Quantity
A factor to subtract from the signal before rectification. If `'mean'`
or `'median'`, the mean or median value of the entire signal is
subtracted on a channel-by-channel basis.
Default: None
t_start, t_stop : quantities.Quantity
Times to start and end the algorithm. The signal is cropped using
`signal.time_slice(t_start, t_stop)` after baseline removal. Useful if
you want the RAUC for a short section of the signal but want the
mean or median calculation (`baseline='mean'` or `baseline='median'`)
to use the entire signal for better baseline estimation.
Default: None
Returns
-------
quantities.Quantity or neo.AnalogSignal
If the number of bins is 1, the returned object is a scalar or
vector Quantity containing a single RAUC value for each channel.
Otherwise, the returned object is an AnalogSignal containing the
RAUC(s) for each bin stored as a sample, with times corresponding to
the center of each bin. The output signal will have the same number
of channels as the input signal.
Raises
------
TypeError
If the input signal is not a neo.AnalogSignal.
TypeError
If `bin_duation` is not None or a Quantity.
TypeError
If `baseline` is not None, `'mean'`, `'median'`, or a Quantity.
'''
if not isinstance(signal, neo.AnalogSignal):
raise TypeError('Input signal is not a neo.AnalogSignal!')
if baseline is None:
pass
elif baseline is 'mean':
# subtract mean from each channel
signal = signal - signal.mean(axis=0)
elif baseline is 'median':
# subtract median from each channel
signal = signal - np.median(signal.as_quantity(), axis=0)
elif isinstance(baseline, pq.Quantity):
# subtract arbitrary baseline
signal = signal - baseline
else:
raise TypeError(
'baseline must be None, \'mean\', \'median\', '
'or a Quantity: {}'.format(baseline))
# slice the signal after subtracting baseline
signal = signal.time_slice(t_start, t_stop)
if bin_duration is not None:
# from bin duration, determine samples per bin and number of bins
if isinstance(bin_duration, pq.Quantity):
samples_per_bin = int(np.round(
bin_duration.rescale('s')/signal.sampling_period.rescale('s')))
n_bins = int(np.ceil(signal.shape[0]/samples_per_bin))
else:
raise TypeError(
'bin_duration must be a Quantity: {}'.format(bin_duration))
else:
# all samples in one bin
samples_per_bin = signal.shape[0]
n_bins = 1
# store the actual bin duration
bin_duration = samples_per_bin * signal.sampling_period.rescale('s')
# reshape into equal size bins, padding the end with zeros if necessary
n_channels = signal.shape[1]
sig_binned = signal.as_quantity().copy()
sig_binned.resize(n_bins * samples_per_bin, n_channels)
sig_binned = sig_binned.reshape(n_bins, samples_per_bin, n_channels)
# rectify and integrate over each bin
rauc = np.trapz(np.abs(sig_binned), dx=signal.sampling_period, axis=1)
if n_bins == 1:
# return a single value for each channel
return rauc.squeeze()
else:
# return an AnalogSignal with times corresponding to center of each bin
rauc_sig = neo.AnalogSignal(
rauc,
t_start=signal.t_start.rescale('s')+bin_duration/2,
sampling_period=bin_duration,
)
return rauc_sig
|
def rauc(signal, baseline=None, bin_duration=None, t_start=None, t_stop=None):
'''
Calculate the rectified area under the curve (RAUC) for an AnalogSignal.
The signal is optionally divided into bins with duration `bin_duration`,
and the rectified signal (absolute value) is integrated within each bin to
find the area under the curve. The mean or median of the signal or an
arbitrary baseline may optionally be subtracted before rectification. If
the number of bins is 1 (default), a single value is returned for each
channel in the input signal. Otherwise, an AnalogSignal containing the
values for each bin is returned along with the times of the centers of the
bins.
Parameters
----------
signal : neo.AnalogSignal
The signal to integrate. If `signal` contains more than one channel,
each is integrated separately.
bin_duration : quantities.Quantity
The length of time that each integration should span. If None, there
will be only one bin spanning the entire signal duration. If
`bin_duration` does not divide evenly into the signal duration, the end
of the signal is padded with zeros to accomodate the final,
overextending bin.
Default: None
baseline : string or quantities.Quantity
A factor to subtract from the signal before rectification. If `'mean'`
or `'median'`, the mean or median value of the entire signal is
subtracted on a channel-by-channel basis.
Default: None
t_start, t_stop : quantities.Quantity
Times to start and end the algorithm. The signal is cropped using
`signal.time_slice(t_start, t_stop)` after baseline removal. Useful if
you want the RAUC for a short section of the signal but want the
mean or median calculation (`baseline='mean'` or `baseline='median'`)
to use the entire signal for better baseline estimation.
Default: None
Returns
-------
quantities.Quantity or neo.AnalogSignal
If the number of bins is 1, the returned object is a scalar or
vector Quantity containing a single RAUC value for each channel.
Otherwise, the returned object is an AnalogSignal containing the
RAUC(s) for each bin stored as a sample, with times corresponding to
the center of each bin. The output signal will have the same number
of channels as the input signal.
Raises
------
TypeError
If the input signal is not a neo.AnalogSignal.
TypeError
If `bin_duation` is not None or a Quantity.
TypeError
If `baseline` is not None, `'mean'`, `'median'`, or a Quantity.
'''
if not isinstance(signal, neo.AnalogSignal):
raise TypeError('Input signal is not a neo.AnalogSignal!')
if baseline is None:
pass
elif baseline is 'mean':
# subtract mean from each channel
signal = signal - signal.mean(axis=0)
elif baseline is 'median':
# subtract median from each channel
signal = signal - np.median(signal.as_quantity(), axis=0)
elif isinstance(baseline, pq.Quantity):
# subtract arbitrary baseline
signal = signal - baseline
else:
raise TypeError(
'baseline must be None, \'mean\', \'median\', '
'or a Quantity: {}'.format(baseline))
# slice the signal after subtracting baseline
signal = signal.time_slice(t_start, t_stop)
if bin_duration is not None:
# from bin duration, determine samples per bin and number of bins
if isinstance(bin_duration, pq.Quantity):
samples_per_bin = int(np.round(
bin_duration.rescale('s')/signal.sampling_period.rescale('s')))
n_bins = int(np.ceil(signal.shape[0]/samples_per_bin))
else:
raise TypeError(
'bin_duration must be a Quantity: {}'.format(bin_duration))
else:
# all samples in one bin
samples_per_bin = signal.shape[0]
n_bins = 1
# store the actual bin duration
bin_duration = samples_per_bin * signal.sampling_period.rescale('s')
# reshape into equal size bins, padding the end with zeros if necessary
n_channels = signal.shape[1]
sig_binned = signal.as_quantity().copy()
sig_binned.resize(n_bins * samples_per_bin, n_channels)
sig_binned = sig_binned.reshape(n_bins, samples_per_bin, n_channels)
# rectify and integrate over each bin
rauc = np.trapz(np.abs(sig_binned), dx=signal.sampling_period, axis=1)
if n_bins == 1:
# return a single value for each channel
return rauc.squeeze()
else:
# return an AnalogSignal with times corresponding to center of each bin
rauc_sig = neo.AnalogSignal(
rauc,
t_start=signal.t_start.rescale('s')+bin_duration/2,
sampling_period=bin_duration,
)
return rauc_sig
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.