id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
1,360 |
def plot_dendrogram(model, **kwargs):
# Create linkage matrix and then plot the dendrogram
# create the counts of samples in each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for j in [0, 1]:
if merge[j] < n_samples:
current_count += 1
else:
current_count += counts[merge[j] - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
|
def plot_dendrogram(model, **kwargs):
# Create linkage matrix and then plot the dendrogram
# create the counts of samples in each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for child_idx in merge:
if merge[j] < n_samples:
current_count += 1
else:
current_count += counts[merge[j] - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
|
57,557 |
def _main():
commit_hash = sys.argv[1]
pr_number = get_pr_number(commit_hash)
if not pr_number:
return
merger, labels = get_pr_merger_and_labels(pr_number)
is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))
if not is_properly_labeled:
print(f"""Hi @{merger}
You merged this PR, but the category labels are missing.
Please add a primary label ({_get_formatted(PRIMARY_LABELS)}) and a secondary label ({_get_formatted(SECONDARY_LABELS)}).
""") # noqa: E501
|
def _main():
commit_hash = sys.argv[1]
pr_number = get_pr_number(commit_hash)
if not pr_number:
return
merger, labels = get_pr_merger_and_labels(pr_number)
is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))
if not is_properly_labeled:
print(f"""Hi @{merger}
You merged this PR, but one or more labels are missing.
Please include a primary label ({_get_formatted(PRIMARY_LABELS)}) and a secondary label ({_get_formatted(SECONDARY_LABELS)}).
""") # noqa: E501
|
57,999 |
def fetch_incidents(client: Client):
max_results = arg_to_number(arg=demisto.params().get('max_fetch'), arg_name='max_fetch', required=False)
first_fetch_time = arg_to_datetime(demisto.params().get('first_fetch')).isoformat()
last_run = demisto.getLastRun()
last_fetch = last_run.get('last_fetch', first_fetch_time)
incidentsList=[]
alert_response = client.correlation_alerts()
incident_data = alert_response['Data']
for inc in incident_data:
if len(incidentsList) > max_results:
break
incident_name = inc['CorrelationAlert']['NAME']
time_stamp = inc['CorrelationAlert']['CREATEDATE']+"Z"
severityLvl = int(inc['CorrelationAlert']['RISK'])
if severityLvl >=0 and severityLvl <= 5:
severity = 1
elif severityLvl > 5 and severityLvl <= 7:
severity = 2
elif severityLvl > 7 and severityLvl <= 9:
severity = 3
elif severityLvl > 9 and severityLvl <= 10:
severity = 4
else:
severity = 0
# "log" column is stringfyed 'Log' data.
inc['Log'].pop("log")
incidentObject = {**inc['Log'], **inc['CorrelationAlert']}
incident = {
'name': incident_name,
'occurred': time_stamp,
'rawJSON': json.dumps(incidentObject),
"severity": severity,
'type': 'Crpyotsim CorrelationAlert'
}
incidentsList.append(incident)
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': last_fetch}
return next_run, incidentsList
|
def fetch_incidents(client: Client, params):
max_results = arg_to_number(arg=params.get('max_fetch'), arg_name='max_fetch', required=False)
first_fetch_time = arg_to_datetime(params.get('first_fetch')).isoformat()
last_run = demisto.getLastRun()
last_fetch = last_run.get('last_fetch', first_fetch_time)
incidentsList=[]
alert_response = client.correlation_alerts()
incident_data = alert_response['Data']
for inc in incident_data:
if len(incidentsList) > max_results:
break
incident_name = inc['CorrelationAlert']['NAME']
time_stamp = inc['CorrelationAlert']['CREATEDATE']+"Z"
severityLvl = int(inc['CorrelationAlert']['RISK'])
if severityLvl >=0 and severityLvl <= 5:
severity = 1
elif severityLvl > 5 and severityLvl <= 7:
severity = 2
elif severityLvl > 7 and severityLvl <= 9:
severity = 3
elif severityLvl > 9 and severityLvl <= 10:
severity = 4
else:
severity = 0
# "log" column is stringfyed 'Log' data.
inc['Log'].pop("log")
incidentObject = {**inc['Log'], **inc['CorrelationAlert']}
incident = {
'name': incident_name,
'occurred': time_stamp,
'rawJSON': json.dumps(incidentObject),
"severity": severity,
'type': 'Crpyotsim CorrelationAlert'
}
incidentsList.append(incident)
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': last_fetch}
return next_run, incidentsList
|
5,376 |
def test_present():
"""
Test to verify that the specified host is known by the specified user.
"""
name = "github.com"
user = "root"
key = "16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48"
fingerprint = [key]
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
with patch.dict(ssh_known_hosts.__opts__, {"test": True}):
with patch.object(os.path, "isabs", MagicMock(return_value=False)):
comt = 'If not specifying a "user", ' 'specify an absolute "config".'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name) == ret
comt = 'Specify either "key" or "fingerprint", not both.'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user, key=key, fingerprint=[key]) == ret
comt = 'Required argument "enc" if using "key" argument.'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user, key=key) == ret
mock = MagicMock(side_effect=["exists", "add", "update"])
with patch.dict(ssh_known_hosts.__salt__, {"ssh.check_known_host": mock}):
comt = "Host github.com is already in .ssh/known_hosts"
ret.update({"comment": comt, "result": True})
assert ssh_known_hosts.present(name, user) == ret
comt = "Key for github.com is set to be" " added to .ssh/known_hosts"
ret.update({"comment": comt, "result": None})
assert ssh_known_hosts.present(name, user) == ret
comt = "Key for github.com is set to be " "updated in .ssh/known_hosts"
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user) == ret
with patch.dict(ssh_known_hosts.__opts__, {"test": False}):
result = {"status": "exists", "error": ""}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
comt = "github.com already exists in .ssh/known_hosts"
ret.update({"comment": comt, "result": True})
assert ssh_known_hosts.present(name, user) == ret
result = {"status": "error", "error": ""}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
ret.update({"comment": "", "result": False})
assert ssh_known_hosts.present(name, user) == ret
result = {
"status": "updated",
"error": "",
"new": [{"fingerprint": fingerprint, "key": key}],
"old": "",
}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
comt = "{}'s key saved to .ssh/known_hosts (key: {})".format(name, key)
ret.update(
{
"comment": comt,
"result": True,
"changes": {
"new": [{"fingerprint": fingerprint, "key": key}],
"old": "",
},
}
)
assert ssh_known_hosts.present(name, user, key=key) == ret
comt = "{}'s key saved to .ssh/known_hosts (fingerprint: {})".format(
name, fingerprint
)
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user) == ret
|
def test_present():
"""
Test to verify that the specified host is known by the specified user.
"""
name = "github.com"
user = "root"
key = "16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48"
fingerprint = [key]
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
with patch.dict(ssh_known_hosts.__opts__, {"test": True}):
with patch.object(os.path, "isabs", MagicMock(return_value=False)):
comt = 'If not specifying a "user", ' 'specify an absolute "config".'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name) == ret
comt = 'Specify either "key" or "fingerprint", not both.'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user, key=key, fingerprint=[key]) == ret
comt = 'Required argument "enc" if using "key" argument.'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user, key=key) == ret
mock = MagicMock(side_effect=["exists", "add", "update"])
with patch.dict(ssh_known_hosts.__salt__, {"ssh.check_known_host": mock}):
comt = "Host github.com is already in .ssh/known_hosts"
ret.update({"comment": comt, "result": True})
assert ssh_known_hosts.present(name, user) == ret
comt = "Key for github.com is set to be" " added to .ssh/known_hosts"
ret.update({"comment": comt, "result": None})
assert ssh_known_hosts.present(name, user) == ret
comt = "Key for github.com is set to be updated in .ssh/known_hosts"
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user) == ret
with patch.dict(ssh_known_hosts.__opts__, {"test": False}):
result = {"status": "exists", "error": ""}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
comt = "github.com already exists in .ssh/known_hosts"
ret.update({"comment": comt, "result": True})
assert ssh_known_hosts.present(name, user) == ret
result = {"status": "error", "error": ""}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
ret.update({"comment": "", "result": False})
assert ssh_known_hosts.present(name, user) == ret
result = {
"status": "updated",
"error": "",
"new": [{"fingerprint": fingerprint, "key": key}],
"old": "",
}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
comt = "{}'s key saved to .ssh/known_hosts (key: {})".format(name, key)
ret.update(
{
"comment": comt,
"result": True,
"changes": {
"new": [{"fingerprint": fingerprint, "key": key}],
"old": "",
},
}
)
assert ssh_known_hosts.present(name, user, key=key) == ret
comt = "{}'s key saved to .ssh/known_hosts (fingerprint: {})".format(
name, fingerprint
)
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user) == ret
|
32,302 |
def filter_incidents_by_duplicates_and_limit(incidents_res, last_run, fetch_limit, id_field):
"""
Removes duplicate incidents from response and returns the incidents till limit.
The function should be called after getting the get-incidents API response,
and by passing the id_field it will filter out the incidents that were already fetched
by checking the incident IDs that are saved from the previous fetch in the last run object
:type incidents_res: ``list``
:param incidents_res: The incidents from the API response
:type last_run: ``dict``
:param last_run: The LastRun object
:type fetch_limit: ``int``
:param fetch_limit: The incidents limit to return
:type id_field: ``str``
:param id_field: The incident id field
:return: List of incidents after filtering duplicates when len(incidents) <= limit
:rtype: ``list``
"""
found_incidents = last_run.get('found_incident_ids', {})
incidents = []
for incident in incidents_res:
if incident[id_field] not in found_incidents:
incidents.append(incident)
return incidents[:fetch_limit]
|
def filter_incidents_by_duplicates_and_limit(incidents_res, last_run, fetch_limit, id_field):
"""
Removes duplicate incidents from response and returns the incidents till limit.
The function should be called in case using look-back greater than zero after getting the get-incidents API response,
and by passing the id_field it will filter out the incidents that were already fetched
by checking the incident IDs that are saved from the previous fetch in the last run object
:type incidents_res: ``list``
:param incidents_res: The incidents from the API response
:type last_run: ``dict``
:param last_run: The LastRun object
:type fetch_limit: ``int``
:param fetch_limit: The incidents limit to return
:type id_field: ``str``
:param id_field: The incident id field
:return: List of incidents after filtering duplicates when len(incidents) <= limit
:rtype: ``list``
"""
found_incidents = last_run.get('found_incident_ids', {})
incidents = []
for incident in incidents_res:
if incident[id_field] not in found_incidents:
incidents.append(incident)
return incidents[:fetch_limit]
|
33,652 |
def _process_observations(base_env, policies, batch_builder_pool,
active_episodes, unfiltered_obs, rewards, dones,
infos, off_policy_actions, horizon, preprocessors,
obs_filters, rollout_fragment_length, pack,
callbacks, soft_horizon, no_done_at_end):
"""Record new data from the environment and prepare for policy evaluation.
Returns:
active_envs: set of non-terminated env ids
to_eval: map of policy_id to list of agent PolicyEvalData
outputs: list of metrics and samples to return from the sampler
"""
active_envs = set()
to_eval = defaultdict(list)
outputs = []
large_batch_threshold = max(1000, rollout_fragment_length * 10) if \
rollout_fragment_length != float("inf") else 5000
# For each environment
for env_id, agent_obs in unfiltered_obs.items():
new_episode = env_id not in active_episodes
episode = active_episodes[env_id]
if not new_episode:
episode.length += 1
episode.batch_builder.count += 1
episode._add_agent_rewards(rewards[env_id])
if (episode.batch_builder.total() > large_batch_threshold
and log_once("large_batch_warning")):
logger.warning(
"More than {} observations for {} env steps ".format(
episode.batch_builder.total(),
episode.batch_builder.count) + "are buffered in "
"the sampler. If this is more than you expected, check that "
"that you set a horizon on your environment correctly and that"
" it terminates at some point. "
"Note: In multi-agent environments, `rollout_fragment_length` "
"sets the batch size based on environment steps, not the "
"steps of "
"individual agents, which can result in unexpectedly large "
"batches. Also, you may be in evaluation waiting for your Env "
"to terminate (batch_mode=`complete_episodes`). Make sure it "
"does at some point.")
# Check episode termination conditions
if dones[env_id]["__all__"] or episode.length >= horizon:
hit_horizon = (episode.length >= horizon
and not dones[env_id]["__all__"])
all_done = True
atari_metrics = _fetch_atari_metrics(base_env)
if atari_metrics is not None:
for m in atari_metrics:
outputs.append(
m._replace(custom_metrics=episode.custom_metrics))
else:
outputs.append(
RolloutMetrics(episode.length, episode.total_reward,
dict(episode.agent_rewards),
episode.custom_metrics, {},
episode.hist_data))
else:
hit_horizon = False
all_done = False
active_envs.add(env_id)
# For each agent in the environment.
for agent_id, raw_obs in agent_obs.items():
policy_id = episode.policy_for(agent_id)
prep_obs = _get_or_raise(preprocessors,
policy_id).transform(raw_obs)
if log_once("prep_obs"):
logger.info("Preprocessed obs: {}".format(summarize(prep_obs)))
filtered_obs = _get_or_raise(obs_filters, policy_id)(prep_obs)
if log_once("filtered_obs"):
logger.info("Filtered obs: {}".format(summarize(filtered_obs)))
agent_done = bool(all_done or dones[env_id].get(agent_id))
if not agent_done:
to_eval[policy_id].append(
PolicyEvalData(env_id, agent_id, filtered_obs,
infos[env_id].get(agent_id, {}),
episode.rnn_state_for(agent_id),
episode.last_action_for(agent_id),
rewards[env_id][agent_id] or 0.0))
last_observation = episode.last_observation_for(agent_id)
episode._set_last_observation(agent_id, filtered_obs)
episode._set_last_raw_obs(agent_id, raw_obs)
episode._set_last_info(agent_id, infos[env_id].get(agent_id, {}))
# Record transition info if applicable
if (last_observation is not None and infos[env_id].get(
agent_id, {}).get("training_enabled", True)):
episode.batch_builder.add_values(
agent_id,
policy_id,
t=episode.length - 1,
eps_id=episode.episode_id,
agent_index=episode._agent_index(agent_id),
obs=last_observation,
actions=episode.last_action_for(agent_id),
rewards=rewards[env_id][agent_id],
prev_actions=episode.prev_action_for(agent_id),
prev_rewards=episode.prev_reward_for(agent_id),
dones=(False if (no_done_at_end
or (hit_horizon and soft_horizon)) else
agent_done),
infos=infos[env_id].get(agent_id, {}),
new_obs=filtered_obs,
**episode.last_pi_info_for(agent_id))
# Invoke the step callback after the step is logged to the episode
if callbacks.get("on_episode_step"):
callbacks["on_episode_step"]({"env": base_env, "episode": episode})
# Cut the batch if we're not packing multiple episodes into one,
# or if we've exceeded the requested batch size.
if episode.batch_builder.has_pending_agent_data():
if dones[env_id]["__all__"] and not no_done_at_end:
episode.batch_builder.check_missing_dones()
if (all_done and not pack) or \
episode.batch_builder.count >= rollout_fragment_length:
outputs.append(episode.batch_builder.build_and_reset(episode))
elif all_done:
# Make sure postprocessor stays within one episode
episode.batch_builder.postprocess_batch_so_far(episode)
if all_done:
# Handle episode termination
batch_builder_pool.append(episode.batch_builder)
# Call each policy's Exploration.on_episode_end method.
for p in policies.values():
p.exploration.on_episode_end(
policy=p,
environment=base_env,
episode=episode,
tf_sess=getattr(p, "_sess", None))
# Call custom on_episode_end callback.
if callbacks.get("on_episode_end"):
callbacks["on_episode_end"]({
"env": base_env,
"policy": policies,
"episode": episode
})
if hit_horizon and soft_horizon:
episode.soft_reset()
resetted_obs = agent_obs
else:
del active_episodes[env_id]
resetted_obs = base_env.try_reset(env_id)
if resetted_obs is None:
# Reset not supported, drop this env from the ready list
if horizon != float("inf"):
raise ValueError(
"Setting episode horizon requires reset() support "
"from the environment.")
elif resetted_obs != ASYNC_RESET_RETURN:
# Creates a new episode if this is not async return
# If reset is async, we will get its result in some future poll
episode = active_episodes[env_id]
for agent_id, raw_obs in resetted_obs.items():
policy_id = episode.policy_for(agent_id)
policy = _get_or_raise(policies, policy_id)
prep_obs = _get_or_raise(preprocessors,
policy_id).transform(raw_obs)
filtered_obs = _get_or_raise(obs_filters,
policy_id)(prep_obs)
episode._set_last_observation(agent_id, filtered_obs)
to_eval[policy_id].append(
PolicyEvalData(
env_id, agent_id, filtered_obs,
episode.last_info_for(agent_id) or {},
episode.rnn_state_for(agent_id),
np.zeros_like(
_flatten_action(policy.action_space.sample())),
0.0))
return active_envs, to_eval, outputs
|
def _process_observations(base_env, policies, batch_builder_pool,
active_episodes, unfiltered_obs, rewards, dones,
infos, off_policy_actions, horizon, preprocessors,
obs_filters, rollout_fragment_length, pack,
callbacks, soft_horizon, no_done_at_end):
"""Record new data from the environment and prepare for policy evaluation.
Returns:
active_envs: set of non-terminated env ids
to_eval: map of policy_id to list of agent PolicyEvalData
outputs: list of metrics and samples to return from the sampler
"""
active_envs = set()
to_eval = defaultdict(list)
outputs = []
large_batch_threshold = max(1000, rollout_fragment_length * 10) if \
rollout_fragment_length != float("inf") else 5000
# For each environment
for env_id, agent_obs in unfiltered_obs.items():
new_episode = env_id not in active_episodes
episode = active_episodes[env_id]
if not new_episode:
episode.length += 1
episode.batch_builder.count += 1
episode._add_agent_rewards(rewards[env_id])
if (episode.batch_builder.total() > large_batch_threshold
and log_once("large_batch_warning")):
logger.warning(
"More than {} observations for {} env steps ".format(
episode.batch_builder.total(),
episode.batch_builder.count) + "are buffered in "
"the sampler. If this is more than you expected, check that "
"that you set a horizon on your environment correctly and that"
" it terminates at some point. "
"Note: In multi-agent environments, `rollout_fragment_length` "
"sets the batch size based on environment steps, not the "
"steps of "
"individual agents, which can result in unexpectedly large "
"batches. Also, you may be in evaluation waiting for your Env "
"to terminate (batch_mode=`complete_episodes`). Make sure it "
"does at some point.")
# Check episode termination conditions
if dones[env_id]["__all__"] or episode.length >= horizon:
hit_horizon = (episode.length >= horizon
and not dones[env_id]["__all__"])
all_done = True
atari_metrics = _fetch_atari_metrics(base_env)
if atari_metrics is not None:
for m in atari_metrics:
outputs.append(
m._replace(custom_metrics=episode.custom_metrics))
else:
outputs.append(
RolloutMetrics(episode.length, episode.total_reward,
dict(episode.agent_rewards),
episode.custom_metrics, {},
episode.hist_data))
else:
hit_horizon = False
all_done = False
active_envs.add(env_id)
# For each agent in the environment.
for agent_id, raw_obs in agent_obs.items():
policy_id = episode.policy_for(agent_id)
prep_obs = _get_or_raise(preprocessors,
policy_id).transform(raw_obs)
if log_once("prep_obs"):
logger.info("Preprocessed obs: {}".format(summarize(prep_obs)))
filtered_obs = _get_or_raise(obs_filters, policy_id)(prep_obs)
if log_once("filtered_obs"):
logger.info("Filtered obs: {}".format(summarize(filtered_obs)))
agent_done = bool(all_done or dones[env_id].get(agent_id))
if not agent_done:
to_eval[policy_id].append(
PolicyEvalData(env_id, agent_id, filtered_obs,
infos[env_id].get(agent_id, {}),
episode.rnn_state_for(agent_id),
episode.last_action_for(agent_id),
rewards[env_id][agent_id] or 0.0))
last_observation = episode.last_observation_for(agent_id)
episode._set_last_observation(agent_id, filtered_obs)
episode._set_last_raw_obs(agent_id, raw_obs)
episode._set_last_info(agent_id, infos[env_id].get(agent_id, {}))
# Record transition info if applicable
if (last_observation is not None and infos[env_id].get(
agent_id, {}).get("training_enabled", True)):
episode.batch_builder.add_values(
agent_id,
policy_id,
t=episode.length - 1,
eps_id=episode.episode_id,
agent_index=episode._agent_index(agent_id),
obs=last_observation,
actions=episode.last_action_for(agent_id),
rewards=rewards[env_id][agent_id],
prev_actions=episode.prev_action_for(agent_id),
prev_rewards=episode.prev_reward_for(agent_id),
dones=(False if (no_done_at_end
or (hit_horizon and soft_horizon)) else
agent_done),
infos=infos[env_id].get(agent_id, {}),
new_obs=filtered_obs,
**episode.last_pi_info_for(agent_id))
# Invoke the step callback after the step is logged to the episode
if callbacks.get("on_episode_step"):
callbacks["on_episode_step"]({"env": base_env, "episode": episode})
# Cut the batch if we're not packing multiple episodes into one,
# or if we've exceeded the requested batch size.
if episode.batch_builder.has_pending_agent_data():
if dones[env_id]["__all__"] and not no_done_at_end:
episode.batch_builder.check_missing_dones()
if (all_done and not pack) or \
episode.batch_builder.count >= rollout_fragment_length:
outputs.append(episode.batch_builder.build_and_reset(episode))
elif all_done:
# Make sure postprocessor stays within one episode
episode.batch_builder.postprocess_batch_so_far(episode)
if all_done:
# Handle episode termination
batch_builder_pool.append(episode.batch_builder)
# Call each policy's Exploration.on_episode_end method.
for p in policies.values():
p.exploration.on_episode_end(
policy=p,
environment=base_env,
episode=episode,
tf_sess=tf_sess)
# Call custom on_episode_end callback.
if callbacks.get("on_episode_end"):
callbacks["on_episode_end"]({
"env": base_env,
"policy": policies,
"episode": episode
})
if hit_horizon and soft_horizon:
episode.soft_reset()
resetted_obs = agent_obs
else:
del active_episodes[env_id]
resetted_obs = base_env.try_reset(env_id)
if resetted_obs is None:
# Reset not supported, drop this env from the ready list
if horizon != float("inf"):
raise ValueError(
"Setting episode horizon requires reset() support "
"from the environment.")
elif resetted_obs != ASYNC_RESET_RETURN:
# Creates a new episode if this is not async return
# If reset is async, we will get its result in some future poll
episode = active_episodes[env_id]
for agent_id, raw_obs in resetted_obs.items():
policy_id = episode.policy_for(agent_id)
policy = _get_or_raise(policies, policy_id)
prep_obs = _get_or_raise(preprocessors,
policy_id).transform(raw_obs)
filtered_obs = _get_or_raise(obs_filters,
policy_id)(prep_obs)
episode._set_last_observation(agent_id, filtered_obs)
to_eval[policy_id].append(
PolicyEvalData(
env_id, agent_id, filtered_obs,
episode.last_info_for(agent_id) or {},
episode.rnn_state_for(agent_id),
np.zeros_like(
_flatten_action(policy.action_space.sample())),
0.0))
return active_envs, to_eval, outputs
|
27,298 |
def test_rows_ambiguous():
with pytest.raises(
ValueError,
match="Both columns and schema are not None",
):
ibis.table(
data=[(1,)],
schema=ibis.schema(dict(a="int8")),
columns=["a"],
)
|
def test_table_literal_ambiguous():
with pytest.raises(
ValueError,
match="Both columns and schema are not None",
):
ibis.table(
data=[(1,)],
schema=ibis.schema(dict(a="int8")),
columns=["a"],
)
|
48,482 |
def sign_manifest(signature_path, manifest_path, module, collection_setup_result):
collection_setup_result['gpg_detach_sign'] = {'signature_path': signature_path}
status_fd_read, status_fd_write = os.pipe()
gpg_cmd = [
"gpg",
"--batch",
"--pinentry-mode",
"loopback",
"--yes",
# "--passphrase",
# "SECRET",
"--homedir",
module.params['signature_dir'],
"--detach-sign",
"--armor",
"--output",
signature_path,
manifest_path,
]
try:
p = subprocess.Popen(
gpg_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
pass_fds=(status_fd_write,),
encoding='utf8',
)
except (FileNotFoundError, subprocess.SubprocessError) as err:
collection_setup_result['gpg_detach_sign']['error'] = "Failed during GnuPG verification with command '{gpg_cmd}': {err}".format(
gpg_cmd=gpg_cmd, err=err
)
else:
stdout, stderr = p.communicate()
collection_setup_result['gpg_detach_sign']['stdout'] = stdout
if stderr:
error = "Failed during GnuPG verification with command '{gpg_cmd}':\n{stderr}".format(gpg_cmd=gpg_cmd, stderr=stderr)
collection_setup_result['gpg_detach_sign']['error'] = error
finally:
os.close(status_fd_write)
|
def sign_manifest(signature_path, manifest_path, module, collection_setup_result):
collection_setup_result['gpg_detach_sign'] = {'signature_path': signature_path}
status_fd_read, status_fd_write = os.pipe()
gpg_cmd = [
"gpg",
"--batch",
"--pinentry-mode",
"loopback",
"--yes",
"--homedir",
module.params['signature_dir'],
"--detach-sign",
"--armor",
"--output",
signature_path,
manifest_path,
]
try:
p = subprocess.Popen(
gpg_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
pass_fds=(status_fd_write,),
encoding='utf8',
)
except (FileNotFoundError, subprocess.SubprocessError) as err:
collection_setup_result['gpg_detach_sign']['error'] = "Failed during GnuPG verification with command '{gpg_cmd}': {err}".format(
gpg_cmd=gpg_cmd, err=err
)
else:
stdout, stderr = p.communicate()
collection_setup_result['gpg_detach_sign']['stdout'] = stdout
if stderr:
error = "Failed during GnuPG verification with command '{gpg_cmd}':\n{stderr}".format(gpg_cmd=gpg_cmd, stderr=stderr)
collection_setup_result['gpg_detach_sign']['error'] = error
finally:
os.close(status_fd_write)
|
37,245 |
def random_clifford(num_qubits, seed=None):
"""Return a random Clifford operator.
The Clifford is sampled using the method of Reference [1].
Args:
num_qubits (int): the number of qubits for the Clifford
seed (int or np.random.Generator): Optional. Set a fixed seed or
generator for RNG.
Returns:
Clifford: a random Clifford operator.
Reference:
1. S. Bravyi and D. Maslov, *Hadamard-free circuits expose the
structure of the Clifford group*.
`arXiv:2003.09412 [quant-ph] <https://arxiv.org/abs/2003.09412>`_
"""
if seed is None:
rng = np.random.default_rng()
elif isinstance(seed, np.random.Generator):
rng = seed
else:
rng = default_rng(seed)
had, perm = _sample_qmallows(num_qubits, rng)
gamma1 = np.diag(rng.integers(2, size=num_qubits, dtype=np.int8))
gamma2 = np.diag(rng.integers(2, size=num_qubits, dtype=np.int8))
delta1 = np.eye(num_qubits, dtype=np.int8)
delta2 = delta1.copy()
_fill_tril(gamma1, rng, symmetric=True)
_fill_tril(gamma2, rng, symmetric=True)
_fill_tril(delta1, rng)
_fill_tril(delta2, rng)
# For large num_qubits numpy.inv function called bellow can
# return invalid output leading to a non-symplectic Clifford
# being generated. This can be prevented by manually forcing
# block inversion of the matrix.
block_inverse_threshold = 50
# Compute stabilizer table
zero = np.zeros((num_qubits, num_qubits), dtype=np.int8)
prod1 = np.matmul(gamma1, delta1) % 2
prod2 = np.matmul(gamma2, delta2) % 2
inv1 = _inverse_tril(delta1, block_inverse_threshold).transpose()
inv2 = _inverse_tril(delta2, block_inverse_threshold).transpose()
table1 = np.block([[delta1, zero], [prod1, inv1]])
table2 = np.block([[delta2, zero], [prod2, inv2]])
# Apply qubit permutation
table = table2[np.concatenate([perm, num_qubits + perm])]
# Apply layer of Hadamards
inds = had * np.arange(1, num_qubits + 1)
inds = inds[inds > 0] - 1
lhs_inds = np.concatenate([inds, inds + num_qubits])
rhs_inds = np.concatenate([inds + num_qubits, inds])
table[lhs_inds, :] = table[rhs_inds, :]
# Apply table
table = np.mod(np.matmul(table1, table), 2).astype(np.bool)
# Generate random phases
phase = rng.integers(2, size=2 * num_qubits).astype(np.bool)
return Clifford(StabilizerTable(table, phase))
|
def random_clifford(num_qubits, seed=None):
"""Return a random Clifford operator.
The Clifford is sampled using the method of Reference [1].
Args:
num_qubits (int): the number of qubits for the Clifford
seed (int or np.random.Generator): Optional. Set a fixed seed or
generator for RNG.
Returns:
Clifford: a random Clifford operator.
Reference:
1. S. Bravyi and D. Maslov, *Hadamard-free circuits expose the
structure of the Clifford group*.
`arXiv:2003.09412 [quant-ph] <https://arxiv.org/abs/2003.09412>`_
"""
if seed is None:
rng = np.random.default_rng()
elif isinstance(seed, np.random.Generator):
rng = seed
else:
rng = default_rng(seed)
had, perm = _sample_qmallows(num_qubits, rng)
gamma1 = np.diag(rng.integers(2, size=num_qubits, dtype=np.int8))
gamma2 = np.diag(rng.integers(2, size=num_qubits, dtype=np.int8))
delta1 = np.eye(num_qubits, dtype=np.int8)
delta2 = delta1.copy()
_fill_tril(gamma1, rng, symmetric=True)
_fill_tril(gamma2, rng, symmetric=True)
_fill_tril(delta1, rng)
_fill_tril(delta2, rng)
# For large num_qubits numpy.inv function called below can
# return invalid output leading to a non-symplectic Clifford
# being generated. This can be prevented by manually forcing
# block inversion of the matrix.
block_inverse_threshold = 50
# Compute stabilizer table
zero = np.zeros((num_qubits, num_qubits), dtype=np.int8)
prod1 = np.matmul(gamma1, delta1) % 2
prod2 = np.matmul(gamma2, delta2) % 2
inv1 = _inverse_tril(delta1, block_inverse_threshold).transpose()
inv2 = _inverse_tril(delta2, block_inverse_threshold).transpose()
table1 = np.block([[delta1, zero], [prod1, inv1]])
table2 = np.block([[delta2, zero], [prod2, inv2]])
# Apply qubit permutation
table = table2[np.concatenate([perm, num_qubits + perm])]
# Apply layer of Hadamards
inds = had * np.arange(1, num_qubits + 1)
inds = inds[inds > 0] - 1
lhs_inds = np.concatenate([inds, inds + num_qubits])
rhs_inds = np.concatenate([inds + num_qubits, inds])
table[lhs_inds, :] = table[rhs_inds, :]
# Apply table
table = np.mod(np.matmul(table1, table), 2).astype(np.bool)
# Generate random phases
phase = rng.integers(2, size=2 * num_qubits).astype(np.bool)
return Clifford(StabilizerTable(table, phase))
|
31,505 |
def parse_results(ip: str, raw_result: Dict[str, Any], reliability: DBotScoreReliability) -> List[CommandResults]:
command_results = []
# default values
asn = as_owner = None
feed_related_indicators = []
if raw_result:
hostname = raw_result.get('hostname')
feed_related_indicators.append(
Common.FeedRelatedIndicators(hostname,
FeedIndicatorType.URL if urlRegex.find(hostname)
else FeedIndicatorType.Domain))
if 'asn' in raw_result:
asn = demisto.get(raw_result, 'asn.asn')
as_owner = demisto.get(raw_result, 'asn.name')
as_domain = demisto.get(raw_result, 'asn.domain')
if as_domain:
feed_related_indicators.append(Common.FeedRelatedIndicators(as_domain, FeedIndicatorType.Domain))
elif 'org' in raw_result:
org = raw_result.get('org', '')
org_parts = org.split(' ')
if ' ' in org:
asn, as_owner = org_parts[0], ' '.join(org_parts[1:])
organization = {
'Name': demisto.get(raw_result, 'company.name'),
'Type': demisto.get(raw_result, 'company.type')
} if 'company' in raw_result else None
company_domain = demisto.get(raw_result, 'company.domain')
if company_domain is not None:
feed_related_indicators.append(Common.FeedRelatedIndicators(company_domain, FeedIndicatorType.Domain))
abuse = {
'Address': demisto.get(raw_result, 'abuse.address'),
'Country': demisto.get(raw_result, 'abuse.country'),
'Name': demisto.get(raw_result, 'abuse.name'),
'Network': demisto.get(raw_result, 'abuse.network'),
'Phone': demisto.get(raw_result, 'abuse.phone'),
'Email': demisto.get(raw_result, 'abuse.email')
} if 'abuse' in raw_result else None
tags = []
for (tag_path, tag_name) in (('privacy.hosting', 'hosting'),
('privacy.proxy', 'proxy'),
('privacy.tor', 'tor'),
('privacy.vpn', 'vpn')):
if demisto.get(raw_result, tag_path):
tags.append(tag_name)
city = raw_result.get('city')
region = raw_result.get('region')
postal = raw_result.get('postal')
country = raw_result.get('country')
description = ', '.join(filter(None, [city, region, postal, country]))
# parses geolocation
lat = lon = ''
loc = raw_result.get('loc', '') # empty string as default on purpose,
if ',' in loc:
coordinates = loc.split(',')
lat, lon = float(coordinates[0]), float(coordinates[1])
entry_context = {'Address': raw_result.get('ip'),
'Hostname': hostname,
'ASN': asn,
'ASOwner': as_owner,
'Tags': tags,
'Organization': organization,
'Geo': {'Location': loc, 'Country': country, 'Description': description},
'Registrar': {'Abuse': abuse} if abuse else None}
outputs_key_field = 'Address' # marks the ip address
indicator = Common.IP(
ip=ip,
dbot_score=Common.DBotScore(indicator='ip',
indicator_type=DBotScoreType.IP,
integration_name='IPinfo_v2',
reliability=reliability,
score=Common.DBotScore.NONE),
asn=asn,
hostname=hostname,
feed_related_indicators=feed_related_indicators,
geo_latitude=str(lat) if lat else None,
geo_longitude=str(lon) if lon else None,
geo_description=description or None,
geo_country=country,
tags=','.join(tags))
command_results.append(CommandResults(
readable_output=tableToMarkdown(f'IPinfo results for {ip}', raw_result),
raw_response=raw_result,
outputs_prefix='IPinfo.IP',
outputs=entry_context,
outputs_key_field=outputs_key_field,
indicator=indicator))
if lat and lon:
map_output = CommandResults(raw_response={'lat': lat, 'lng': lon},
entry_type=EntryType.MAP_ENTRY_TYPE,
outputs_key_field=outputs_key_field,
indicator=indicator)
command_results.append(map_output)
return command_results
|
def parse_results(ip: str, raw_result: Dict[str, Any], reliability: DBotScoreReliability) -> List[CommandResults]:
command_results = []
# default values
asn = as_owner = None
feed_related_indicators = []
if raw_result:
hostname = raw_result.get('hostname')
feed_related_indicators.append(
Common.FeedRelatedIndicators(hostname,
FeedIndicatorType.URL if urlRegex.find(hostname)
else FeedIndicatorType.Domain))
if 'asn' in raw_result:
asn = demisto.get(raw_result, 'asn.asn')
as_owner = demisto.get(raw_result, 'asn.name')
as_domain = demisto.get(raw_result, 'asn.domain')
if as_domain:
feed_related_indicators.append(Common.FeedRelatedIndicators(as_domain, FeedIndicatorType.Domain))
elif 'org' in raw_result:
org = raw_result.get('org', '')
org_parts = org.split(' ')
if ' ' in org:
asn, as_owner = org_parts[0], ' '.join(org_parts[1:])
organization = {
'Name': demisto.get(raw_result, 'company.name'),
'Type': demisto.get(raw_result, 'company.type')
} if 'company' in raw_result else None
company_domain = demisto.get(raw_result, 'company.domain')
if company_domain is not None:
feed_related_indicators.append(Common.FeedRelatedIndicators(company_domain, FeedIndicatorType.Domain))
abuse = {
'Address': demisto.get(raw_result, 'abuse.address'),
'Country': demisto.get(raw_result, 'abuse.country'),
'Name': demisto.get(raw_result, 'abuse.name'),
'Network': demisto.get(raw_result, 'abuse.network'),
'Phone': demisto.get(raw_result, 'abuse.phone'),
'Email': demisto.get(raw_result, 'abuse.email')
} if 'abuse' in raw_result else None
tags = []
for (tag_path, tag_name) in (('privacy.hosting', 'hosting'),
('privacy.proxy', 'proxy'),
('privacy.tor', 'tor'),
('privacy.vpn', 'vpn')):
if demisto.get(raw_result, tag_path):
tags.append(tag_name)
city = raw_result.get('city')
region = raw_result.get('region')
postal = raw_result.get('postal')
country = raw_result.get('country')
description = ', '.join(filter(None, [city, region, postal, country]))
# parses geolocation
lat = lon = ''
loc = raw_result.get('loc', '') # empty string as default on purpose,
if ',' in loc:
coordinates = loc.split(',')
lat, lon = float(coordinates[0]), float(coordinates[1])
entry_context = {'Address': raw_result.get('ip'),
'Hostname': hostname,
'ASN': asn,
'ASOwner': as_owner,
'Tags': tags,
'Organization': organization,
'Geo': {'Location': loc, 'Country': country, 'Description': description},
'Registrar': {'Abuse': abuse} if abuse else None}
outputs_key_field = 'Address' # marks the ip address
indicator = Common.IP(
ip=ip,
dbot_score=Common.DBotScore(indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name='IPinfo_v2',
reliability=reliability,
score=Common.DBotScore.NONE),
asn=asn,
hostname=hostname,
feed_related_indicators=feed_related_indicators,
geo_latitude=str(lat) if lat else None,
geo_longitude=str(lon) if lon else None,
geo_description=description or None,
geo_country=country,
tags=','.join(tags))
command_results.append(CommandResults(
readable_output=tableToMarkdown(f'IPinfo results for {ip}', raw_result),
raw_response=raw_result,
outputs_prefix='IPinfo.IP',
outputs=entry_context,
outputs_key_field=outputs_key_field,
indicator=indicator))
if lat and lon:
map_output = CommandResults(raw_response={'lat': lat, 'lng': lon},
entry_type=EntryType.MAP_ENTRY_TYPE,
outputs_key_field=outputs_key_field,
indicator=indicator)
command_results.append(map_output)
return command_results
|
23,071 |
def broadcast_trick(func):
"""
Provide a decorator to wrap common numpy function with a broadcast trick.
Dask arrays are currently immutable; thus when we know an array is uniform,
we can replace the actual data by a single value and have all elements point
to it, this reducing the size.
>>> x = np.broadcast_to(1, (100,100,100))
>>> x.base.nbytes
8
Those array are not only more efficient locally, but dask serialisation is
aware of the _real_ size of those array and thus can send them around
efficiently and schedule accordingly.
Note that those array are read-only and numpy will refuse to assign to them,
so should be safe.
"""
def inner(shape, *args, **kwargs):
return np.broadcast_to(func((), *args, **kwargs), shape)
if func.__doc__ is not None:
inner.__doc__ = func.__doc__
inner.__name__ = func.__name__
return inner
|
def broadcast_trick(func):
"""
Provide a decorator to wrap common numpy function with a broadcast trick.
Dask arrays are currently immutable; thus when we know an array is uniform,
we can replace the actual data by a single value and have all elements point
to it, thus reducing the size.
>>> x = np.broadcast_to(1, (100,100,100))
>>> x.base.nbytes
8
Those array are not only more efficient locally, but dask serialisation is
aware of the _real_ size of those array and thus can send them around
efficiently and schedule accordingly.
Note that those array are read-only and numpy will refuse to assign to them,
so should be safe.
"""
def inner(shape, *args, **kwargs):
return np.broadcast_to(func((), *args, **kwargs), shape)
if func.__doc__ is not None:
inner.__doc__ = func.__doc__
inner.__name__ = func.__name__
return inner
|
44,045 |
def generate_paulis(generators, num_qubits):
"""Generate the single qubit Pauli X operators :math:`sigma^{x}_{i}` that will be used for
for generating cliffords for a Hamiltonian :math:`H`.
Args:
generators (list): list of generators of symmetries, taus, for the Hamiltonian.
num_qubits (int): number of wires required to define the Hamiltonian.
Return:
sigma_x (list): the list of support of the single-qubit PauliX operators used to build
the Clifford operators
.. code-block::
>>> symbols, coordinates = (['H', 'H'], np.array([0., 0., -0.66140414, 0., 0., 0.66140414]))
>>> H, qubits = qml.qchem.molecular_hamiltonian(symbols, coordinates)
>>> generators = generate_symmetries(H, qubits)
>>> generate_clifford(generators, qubits)
[PauliX(wires=[1]), PauliX(wires=[2]), PauliX(wires=[3])]
"""
ops_generator = [g.ops[0] if isinstance(g.ops, list) else g.ops for g in generators]
bmat = _binary_matrix(ops_generator, num_qubits)
sigma_x = []
for row in range(bmat.shape[0]):
bmatrow = bmat[row]
bmatrest = np.delete(bmat, row, axis=0)
for col in range(bmat.shape[1] // 2):
# Anti-commutes with the (row) and commutes with all other symmetries.
if bmatrow[col] and np.array_equal(
bmatrest[:, col], np.zeros(bmat.shape[0] - 1, dtype=int)
):
sigma_x.append(qml.PauliX(col))
break
return sigma_x
|
def generate_paulis(generators, num_qubits):
"""Generate the single qubit Pauli X operators :math:`sigma^{x}_{i}` that will be used for
for generating cliffords for a Hamiltonian :math:`H`.
Args:
generators (list): list of tau symmetry generators for the Hamiltonian.
num_qubits (int): number of wires required to define the Hamiltonian.
Return:
sigma_x (list): the list of support of the single-qubit PauliX operators used to build
the Clifford operators
.. code-block::
>>> symbols, coordinates = (['H', 'H'], np.array([0., 0., -0.66140414, 0., 0., 0.66140414]))
>>> H, qubits = qml.qchem.molecular_hamiltonian(symbols, coordinates)
>>> generators = generate_symmetries(H, qubits)
>>> generate_clifford(generators, qubits)
[PauliX(wires=[1]), PauliX(wires=[2]), PauliX(wires=[3])]
"""
ops_generator = [g.ops[0] if isinstance(g.ops, list) else g.ops for g in generators]
bmat = _binary_matrix(ops_generator, num_qubits)
sigma_x = []
for row in range(bmat.shape[0]):
bmatrow = bmat[row]
bmatrest = np.delete(bmat, row, axis=0)
for col in range(bmat.shape[1] // 2):
# Anti-commutes with the (row) and commutes with all other symmetries.
if bmatrow[col] and np.array_equal(
bmatrest[:, col], np.zeros(bmat.shape[0] - 1, dtype=int)
):
sigma_x.append(qml.PauliX(col))
break
return sigma_x
|
25,794 |
def _MPTT_descendant_ids_statement(
connection, ContentNodeTable, node_ids, min_boundary, max_boundary
):
"""
This function is modified from:
https://github.com/django-mptt/django-mptt/blob/master/mptt/managers.py#L66
in order to render the result as a SQL Alchemy expression that we can use
in other queries.
"""
# First get the relevant MPTT values from the database for the specified node_ids
# for topic nodes in the specified lft/rght range.
mptt_values = connection.execute(
select(
[
ContentNodeTable.c.tree_id,
ContentNodeTable.c.parent_id,
ContentNodeTable.c.lft,
ContentNodeTable.c.rght,
]
)
.order_by(
ContentNodeTable.c.tree_id,
ContentNodeTable.c.parent_id,
ContentNodeTable.c.lft,
)
.where(
and_(
filter_by_uuids(ContentNodeTable.c.id, node_ids),
# Also filter by the boundary conditions
# We are only interested in nodes that are ancestors of
# the nodes in the range, but they could be ancestors of any node
# in this range, so we filter the lft value by being less than
# or equal to the max_boundary, and the rght value by being
# greater than or equal to the min_boundary.
ContentNodeTable.c.lft <= max_boundary,
ContentNodeTable.c.rght >= min_boundary,
# Only select values for descendant constraints from topics
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
).fetchall()
# Now we fetch a list of non-topic ids from the specified node ids
# that match the specified tree boundary ranges
non_topic_node_ids = map(
lambda x: x[0],
connection.execute(
select([ContentNodeTable.c.id]).where(
and_(
filter_by_uuids(ContentNodeTable.c.id, node_ids),
# Also filter by the boundary conditions
# We are only interested in non-topic nodes that
# are inside the range
ContentNodeTable.c.rght >= min_boundary,
ContentNodeTable.c.rght <= max_boundary,
# Produce an id list for non topics
ContentNodeTable.c.kind != content_kinds.TOPIC,
)
)
).fetchall(),
)
or_queries = []
# If we have any node ids that are for not topics, then we add an explicit query
# to match against those node ids
if non_topic_node_ids:
or_queries.append(filter_by_uuids(ContentNodeTable.c.id, non_topic_node_ids))
# Group the resultant mptt data by tree_id and parent_id,
# this will allow us to consolidate contiguous siblings to reduce
# the total number of constraints.
# This logic is verbatim from Django MPTT, only the query construction
# has been translated from Django Q statements to SQL Alchemy and_ statements.
for group in groupby(
mptt_values,
key=lambda n: (
# tree id
n[0],
# parent id
n[1],
),
):
next_lft = None
for node in list(group[1]):
tree = node[0]
lft = min_val = node[2]
rght = max_val = node[3]
if next_lft is None:
next_lft = rght + 1
min_max = {"min": min_val, "max": max_val}
elif lft == next_lft:
if min_val < min_max["min"]:
min_max["min"] = min_val
if max_val > min_max["max"]:
min_max["max"] = max_val
next_lft = rght + 1
elif lft != next_lft:
or_queries.append(
and_(
ContentNodeTable.c.tree_id == tree,
ContentNodeTable.c.lft >= min_max["min"],
ContentNodeTable.c.rght <= min_max["max"],
)
)
min_max = {"min": min_val, "max": max_val}
next_lft = rght + 1
or_queries.append(
and_(
ContentNodeTable.c.tree_id == tree,
ContentNodeTable.c.lft >= min_max["min"],
ContentNodeTable.c.rght <= min_max["max"],
)
)
if not or_queries:
# No constraints that apply in this range, so therefore this query should always
# evaluate to False, because nothing can match it.
return select([ContentNodeTable.c.id]).where(False == True) # noqa E712
# Return a query that ors each of the constraints
return select([ContentNodeTable.c.id]).where(or_(*or_queries))
|
def _MPTT_descendant_ids_statement(
connection, ContentNodeTable, node_ids, min_boundary, max_boundary
):
"""
This function is modified from:
https://github.com/django-mptt/django-mptt/blob/38d46c26ca362c471b097ab96a3616b9b20fb883/mptt/managers.py#L66
in order to render the result as a SQL Alchemy expression that we can use
in other queries.
"""
# First get the relevant MPTT values from the database for the specified node_ids
# for topic nodes in the specified lft/rght range.
mptt_values = connection.execute(
select(
[
ContentNodeTable.c.tree_id,
ContentNodeTable.c.parent_id,
ContentNodeTable.c.lft,
ContentNodeTable.c.rght,
]
)
.order_by(
ContentNodeTable.c.tree_id,
ContentNodeTable.c.parent_id,
ContentNodeTable.c.lft,
)
.where(
and_(
filter_by_uuids(ContentNodeTable.c.id, node_ids),
# Also filter by the boundary conditions
# We are only interested in nodes that are ancestors of
# the nodes in the range, but they could be ancestors of any node
# in this range, so we filter the lft value by being less than
# or equal to the max_boundary, and the rght value by being
# greater than or equal to the min_boundary.
ContentNodeTable.c.lft <= max_boundary,
ContentNodeTable.c.rght >= min_boundary,
# Only select values for descendant constraints from topics
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
).fetchall()
# Now we fetch a list of non-topic ids from the specified node ids
# that match the specified tree boundary ranges
non_topic_node_ids = map(
lambda x: x[0],
connection.execute(
select([ContentNodeTable.c.id]).where(
and_(
filter_by_uuids(ContentNodeTable.c.id, node_ids),
# Also filter by the boundary conditions
# We are only interested in non-topic nodes that
# are inside the range
ContentNodeTable.c.rght >= min_boundary,
ContentNodeTable.c.rght <= max_boundary,
# Produce an id list for non topics
ContentNodeTable.c.kind != content_kinds.TOPIC,
)
)
).fetchall(),
)
or_queries = []
# If we have any node ids that are for not topics, then we add an explicit query
# to match against those node ids
if non_topic_node_ids:
or_queries.append(filter_by_uuids(ContentNodeTable.c.id, non_topic_node_ids))
# Group the resultant mptt data by tree_id and parent_id,
# this will allow us to consolidate contiguous siblings to reduce
# the total number of constraints.
# This logic is verbatim from Django MPTT, only the query construction
# has been translated from Django Q statements to SQL Alchemy and_ statements.
for group in groupby(
mptt_values,
key=lambda n: (
# tree id
n[0],
# parent id
n[1],
),
):
next_lft = None
for node in list(group[1]):
tree = node[0]
lft = min_val = node[2]
rght = max_val = node[3]
if next_lft is None:
next_lft = rght + 1
min_max = {"min": min_val, "max": max_val}
elif lft == next_lft:
if min_val < min_max["min"]:
min_max["min"] = min_val
if max_val > min_max["max"]:
min_max["max"] = max_val
next_lft = rght + 1
elif lft != next_lft:
or_queries.append(
and_(
ContentNodeTable.c.tree_id == tree,
ContentNodeTable.c.lft >= min_max["min"],
ContentNodeTable.c.rght <= min_max["max"],
)
)
min_max = {"min": min_val, "max": max_val}
next_lft = rght + 1
or_queries.append(
and_(
ContentNodeTable.c.tree_id == tree,
ContentNodeTable.c.lft >= min_max["min"],
ContentNodeTable.c.rght <= min_max["max"],
)
)
if not or_queries:
# No constraints that apply in this range, so therefore this query should always
# evaluate to False, because nothing can match it.
return select([ContentNodeTable.c.id]).where(False == True) # noqa E712
# Return a query that ors each of the constraints
return select([ContentNodeTable.c.id]).where(or_(*or_queries))
|
34,724 |
def add_server_arguments(parser: argparse.ArgumentParser) -> None:
"""Add arguments for running API endpoint."""
parser.add_argument(
"--log-file",
type=str,
# Rasa should not log to a file by default, otherwise there will be problems
# when running on OpenShift
default=None,
help="Store logs in specified file.",
)
parser.add_argument(
"--log-rotating",
type=str,
# Rasa should not rotaing log file by default
default=None,
help="""Handler for logging to a file, rotating the log file at certain timed intervals.
supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
"""
)
add_endpoint_param(
parser,
help_text="Configuration file for the model server and the connectors as a "
"yml file.",
)
server_arguments = parser.add_argument_group("Server Settings")
add_port_argument(server_arguments)
server_arguments.add_argument(
"-t",
"--auth-token",
type=str,
help="Enable token based authentication. Requests need to provide "
"the token to be accepted.",
)
server_arguments.add_argument(
"--cors",
nargs="*",
type=str,
help="Enable CORS for the passed origin. Use * to whitelist all origins.",
)
server_arguments.add_argument(
"--enable-api",
action="store_true",
help="Start the web server API in addition to the input channel.",
)
server_arguments.add_argument(
"--response-timeout",
default=constants.DEFAULT_RESPONSE_TIMEOUT,
type=int,
help="Maximum time a response can take to process (sec).",
)
server_arguments.add_argument(
"--remote-storage",
help="Set the remote location where your Rasa model is stored, e.g. on AWS.",
)
server_arguments.add_argument(
"--ssl-certificate",
help="Set the SSL Certificate to create a TLS secured server.",
)
server_arguments.add_argument(
"--ssl-keyfile", help="Set the SSL Keyfile to create a TLS secured server."
)
server_arguments.add_argument(
"--ssl-ca-file",
help="If your SSL certificate needs to be verified, you can specify the CA file "
"using this parameter.",
)
server_arguments.add_argument(
"--ssl-password",
help="If your ssl-keyfile is protected by a password, you can specify it "
"using this paramer.",
)
channel_arguments = parser.add_argument_group("Channels")
channel_arguments.add_argument(
"--credentials",
default=None,
help="Authentication credentials for the connector as a yml file.",
)
channel_arguments.add_argument(
"--connector", type=str, help="Service to connect to."
)
jwt_auth = parser.add_argument_group("JWT Authentication")
jwt_auth.add_argument(
"--jwt-secret",
type=str,
help="Public key for asymmetric JWT methods or shared secret"
"for symmetric methods. Please also make sure to use "
"--jwt-method to select the method of the signature, "
"otherwise this argument will be ignored."
"Note that this key is meant for securing the HTTP API.",
)
jwt_auth.add_argument(
"--jwt-method",
type=str,
default="HS256",
help="Method used for the signature of the JWT authentication payload.",
)
|
def add_server_arguments(parser: argparse.ArgumentParser) -> None:
"""Add arguments for running API endpoint."""
parser.add_argument(
"--log-file",
type=str,
# Rasa should not log to a file by default, otherwise there will be problems
# when running on OpenShift
default=None,
help="Store logs in specified file.",
)
parser.add_argument(
"--log-rotating",
type=str,
# Rasa should not rotate log files by default
default=None,
help="""Handler for logging to a file, rotating the log file at certain timed intervals.
supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
"""
)
add_endpoint_param(
parser,
help_text="Configuration file for the model server and the connectors as a "
"yml file.",
)
server_arguments = parser.add_argument_group("Server Settings")
add_port_argument(server_arguments)
server_arguments.add_argument(
"-t",
"--auth-token",
type=str,
help="Enable token based authentication. Requests need to provide "
"the token to be accepted.",
)
server_arguments.add_argument(
"--cors",
nargs="*",
type=str,
help="Enable CORS for the passed origin. Use * to whitelist all origins.",
)
server_arguments.add_argument(
"--enable-api",
action="store_true",
help="Start the web server API in addition to the input channel.",
)
server_arguments.add_argument(
"--response-timeout",
default=constants.DEFAULT_RESPONSE_TIMEOUT,
type=int,
help="Maximum time a response can take to process (sec).",
)
server_arguments.add_argument(
"--remote-storage",
help="Set the remote location where your Rasa model is stored, e.g. on AWS.",
)
server_arguments.add_argument(
"--ssl-certificate",
help="Set the SSL Certificate to create a TLS secured server.",
)
server_arguments.add_argument(
"--ssl-keyfile", help="Set the SSL Keyfile to create a TLS secured server."
)
server_arguments.add_argument(
"--ssl-ca-file",
help="If your SSL certificate needs to be verified, you can specify the CA file "
"using this parameter.",
)
server_arguments.add_argument(
"--ssl-password",
help="If your ssl-keyfile is protected by a password, you can specify it "
"using this paramer.",
)
channel_arguments = parser.add_argument_group("Channels")
channel_arguments.add_argument(
"--credentials",
default=None,
help="Authentication credentials for the connector as a yml file.",
)
channel_arguments.add_argument(
"--connector", type=str, help="Service to connect to."
)
jwt_auth = parser.add_argument_group("JWT Authentication")
jwt_auth.add_argument(
"--jwt-secret",
type=str,
help="Public key for asymmetric JWT methods or shared secret"
"for symmetric methods. Please also make sure to use "
"--jwt-method to select the method of the signature, "
"otherwise this argument will be ignored."
"Note that this key is meant for securing the HTTP API.",
)
jwt_auth.add_argument(
"--jwt-method",
type=str,
default="HS256",
help="Method used for the signature of the JWT authentication payload.",
)
|
58,412 |
def benchmark(n):
a={'list': [1,2,3,43], 't': (1,2,3), 'str': 'hello', 'subdict': {'a': True}}
dc=A('hello', [1,2,3], True)
for ii in range(n):
for jj in range(60):
_ = copy.deepcopy(a)
for s in ['red', 'blue', 'green']:
dc.string = s
for ii in range(10):
dc.lst[0] = ii
for b in [True, False]:
dc.boolean=b
_ = copy.deepcopy(dc)
|
def benchmark(n):
a = {
'list': [1,2,3,43],
't': (1,2,3),
'str': 'hello',
'subdict': {'a': True},
}
dc=A('hello', [1,2,3], True)
for ii in range(n):
for jj in range(60):
_ = copy.deepcopy(a)
for s in ['red', 'blue', 'green']:
dc.string = s
for ii in range(10):
dc.lst[0] = ii
for b in [True, False]:
dc.boolean=b
_ = copy.deepcopy(dc)
|
47,502 |
def stable_softmax(logits: tf.Tensor, axis: Optional[int] = None, name: Optional[str] = None) -> tf.Tensor:
"""
Stable wrapper that returns the same output as `tf.nn.softmax`, but that works reliably with XLA on CPU. It is
meant as a workaround for the following issue (https://github.com/tensorflow/tensorflow/issues/55682), and will be
removed after it gets fixed. The arguments and outputs are the same as `tf.nn.softmax`, and relies on the fact that
softmax(x) = softmax(x + c) (see https://ogunlao.github.io/2020/04/26/you_dont_really_know_softmax.html).
Args:
logits (`tf.Tensor`). Must be one of the following types: half, float32, float64.
axis (`int`, *optional*). The dimension softmax would be performed on. The default is -1 which indicates the
last dimension.
name (`str`, *optional*). A name for the operation (optional).
Returns:
`tf.Tensor`: A Tensor. Has the same type and shape as logits.
"""
return tf.nn.softmax(logits=logits + 1e-9, axis=axis, name=name)
|
def stable_softmax(logits: tf.Tensor, axis: Optional[int] = None, name: Optional[str] = None) -> tf.Tensor:
"""
Stable wrapper that returns the same output as `tf.nn.softmax`, but that works reliably with XLA on CPU. It is
meant as a workaround for the following issue (https://github.com/tensorflow/tensorflow/issues/55682), and will be
removed after it gets fixed. The arguments and outputs are the same as `tf.nn.softmax`, and relies on the fact that
softmax(x) = softmax(x + c) (see https://ogunlao.github.io/2020/04/26/you_dont_really_know_softmax.html).
Args:
logits (`tf.Tensor`). Must be one of the following types: half, float32, float64.
axis (`int`, *optional*). The dimension softmax would be performed on. The default is -1 which indicates the
last dimension.
name (`str`, *optional*). A name for the operation.
Returns:
`tf.Tensor`: A Tensor. Has the same type and shape as logits.
"""
return tf.nn.softmax(logits=logits + 1e-9, axis=axis, name=name)
|
8,436 |
def _compute_line_flux(spectrum, regions=None):
if regions is not None:
calc_spectrum = extract_region(spectrum, regions)
else:
calc_spectrum = spectrum
# Average dispersion in the line region
avg_dx = (np.abs(np.diff(calc_spectrum.spectral_axis))).quantity
# Account for the existence of a mask.
if hasattr(spectrum, 'mask') and spectrum.mask is not None:
# Cannot use unmasked values because of average dispersion.
# Masked values must enter sum calculation valued as zeros.
flux_ = np.where(calc_spectrum.mask, 0, calc_spectrum.flux)[1:]
else:
flux_ = calc_spectrum.flux[1:]
line_flux = np.sum(flux_ * avg_dx)
# TODO: we may want to consider converting to erg / cm^2 / sec by default
return line_flux
|
def _compute_line_flux(spectrum, regions=None):
if regions is not None:
calc_spectrum = extract_region(spectrum, regions)
else:
calc_spectrum = spectrum
# Average dispersion in the line region
avg_dx = (np.abs(np.diff(calc_spectrum.spectral_axis))).quantity
# Account for the existence of a mask.
if hasattr(spectrum, 'mask') and spectrum.mask is not None:
# Cannot use unmasked values because of average dispersion.
# Masked values must enter sum calculation valued as zeros.
flux_ = np.where(calc_spectrum.mask, 0, calc_spectrum.flux)[1:]
else:
flux = calc_spectrum.flux[1:]
line_flux = np.sum(flux_ * avg_dx)
# TODO: we may want to consider converting to erg / cm^2 / sec by default
return line_flux
|
49,876 |
def dc_ohmic_losses(ohms, current):
"""
Returns ohmic losses in in units of power from the equivalent
resistance of of the wires and the operating current.
Parameters
----------
ohms: numeric, float
current: numeric, float or array-like
Returns
----------
numeric
Single or array-like value of the losses in units of power
References
----------
-- [1] PVsyst 7 Help. "Array ohmic wiring loss".
https://www.pvsyst.com/help/ohmic_loss.htm
"""
return ohms * current * current
|
def dc_ohmic_losses(ohms, current):
"""
Returns ohmic losses in in units of power from the equivalent
resistance of of the wires and the operating current.
Parameters
----------
ohms: numeric, float
current: numeric, float or array-like
Returns
----------
loss: numeric
Power loss [W]
References
----------
-- [1] PVsyst 7 Help. "Array ohmic wiring loss".
https://www.pvsyst.com/help/ohmic_loss.htm
"""
return ohms * current * current
|
49,107 |
def find(x, equation):
"""
Checks whether a Symbol matching ``x`` is present in ``equation``
or not. If present, the matching symbol is returned, else a
ValueError is raised. If ``x`` is a string the matching symbol
will have the same name; if ``x`` is a Symbol then it will be
returned if found.
Examples
========
>>> from sympy.geometry.util import find
>>> from sympy import Dummy
>>> from sympy.abc import x
>>> find('x', x)
x
>>> find('x', Dummy('x'))
_x
>>> Dummy('x').name == 'x'
True
>>> find(x, Dummy('x'))
Traceback (most recent call last):
...
ValueError: could not find x
"""
free = equation.free_symbols
xs = [i for i in free if (i.name if isinstance(x, str) else i) == x]
if not xs:
raise ValueError('could not find %s' % x)
if len(xs) != 1:
raise ValueError('ambiguous %s' % x)
return xs[0]
|
def find(x, equation):
"""
Checks whether a Symbol matching ``x`` is present in ``equation``
or not. If present, the matching symbol is returned, else a
ValueError is raised. If ``x`` is a string the matching symbol
will have the same name; if ``x`` is a Symbol then it will be
returned if found.
Examples
========
>>> from sympy.geometry.util import find
>>> from sympy import Dummy
>>> from sympy.abc import x
>>> find('x', x)
x
>>> find('x', Dummy('x'))
_x
The dummy symbol is returned since it has a matching name:
>>> _.name == 'x'
True
>>> find(x, Dummy('x'))
Traceback (most recent call last):
...
ValueError: could not find x
"""
free = equation.free_symbols
xs = [i for i in free if (i.name if isinstance(x, str) else i) == x]
if not xs:
raise ValueError('could not find %s' % x)
if len(xs) != 1:
raise ValueError('ambiguous %s' % x)
return xs[0]
|
32,399 |
def get_feed_collection(client):
"""
Test the integration connection state
:param client: instance of client to communicate with server
:return: list of collection names
"""
collections = client.get_services()
command_results = CommandResults(
outputs_prefix='CybleIntel.collection',
outputs_key_field='names',
outputs=collections
)
return command_results
|
def get_feed_collection(client: Client):
"""
Test the integration connection state
:param client: instance of client to communicate with server
:return: list of collection names
"""
collections = client.get_services()
command_results = CommandResults(
outputs_prefix='CybleIntel.collection',
outputs_key_field='names',
outputs=collections
)
return command_results
|
38,382 |
def validate_field_key(key):
if (
isinstance(key, tuple)
and len(key) == 2
and all(isinstance(_, str) for _ in key)
):
return
raise TypeError(f"Expected a 2-tuple of strings, received invalid field key: {key}")
|
def validate_field_key(key):
if (
isinstance(key, tuple)
and len(key) == 2
and all(isinstance(_, str) for _ in key)
):
return
raise TypeError(f"Expected a tuple of (field type, field name), received invalid field key: {key}")
|
42,006 |
def _interpolate_zmap(
zmap: Dict[complex, Union[int, float]], contour_plot_num: int
) -> Dict[complex, Union[int, float]]:
# implements interpolation algorithm used in Plotly
# to interpolate heatmaps and contour plots
# https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/interp2d.js#L30
# citing their doc:
#
# > Fill in missing data from a 2D array using an iterative
# > poisson equation solver with zero-derivative BC at edges.
# > Amazingly, this just amounts to repeatedly averaging all the existing
# > nearest neighbors
max_fractional_delta = 1.0
empties = _find_coordinates_where_empty(zmap, contour_plot_num)
# one pass to fill in a starting value for all the empties
zmap, _ = _run_iteration(zmap, empties)
for _ in range(NUM_OPTIMIZATION_ITERATIONS):
if max_fractional_delta > FRACTIONAL_DELTA_THRESHOLD:
# correct for overshoot and run again
max_fractional_delta = 0.5 - 0.25 * min(1, max_fractional_delta * 0.5)
zmatrix, max_fractional_delta = _run_iteration(zmap, empties, max_fractional_delta)
else:
break
return zmap
|
def _interpolate_zmap(
zmap: Dict[complex, Union[int, float]], contour_plot_num: int
) -> Dict[complex, Union[int, float]]:
# implements interpolation algorithm used in Plotly
# to interpolate heatmaps and contour plots
# https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/interp2d.js#L30
# citing their doc:
#
# > Fill in missing data from a 2D array using an iterative
# > poisson equation solver with zero-derivative BC at edges.
# > Amazingly, this just amounts to repeatedly averaging all the existing
# > nearest neighbors
max_fractional_delta = 1.0
empties = _find_coordinates_where_empty(zmap, contour_plot_num)
# one pass to fill in a starting value for all the empties
zmap, _ = _run_iteration(zmap, empties)
for _ in range(NUM_OPTIMIZATION_ITERATIONS):
if max_fractional_delta > FRACTIONAL_DELTA_THRESHOLD:
# correct for overshoot and run again
max_fractional_delta = 0.5 - 0.25 * min(1, max_fractional_delta * 0.5)
max_fractional_delta = _run_iteration(zmap, empties, max_fractional_delta)
else:
break
return zmap
|
45,865 |
def rotation_matrix_to_angle_axis(rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:
r"""Convert 3x3 rotation matrix to Rodrigues vector.
Args:
rotation_matrix: rotation matrix.
Returns:
Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 3)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_angle_axis(input) # Nx3
"""
if not isinstance(rotation_matrix, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(rotation_matrix)}")
if not rotation_matrix.shape[-2:] == (3, 3):
raise ValueError(f"Input size must be a (*, 3, 3) tensor. Got {rotation_matrix.shape}")
axis = torch.zeros((rotation_matrix.shape[0], 3), device=rotation_matrix.device)
axis[:, 0] = rotation_matrix[:, 2, 1] - rotation_matrix[:, 1, 2]
axis[:, 1] = rotation_matrix[:, 0, 2] - rotation_matrix[:, 2, 0]
axis[:, 2] = rotation_matrix[:, 1, 0] - rotation_matrix[:, 0, 1]
# add epsilon for numerical stability
r = torch.norm(axis, dim=1).unsqueeze(1) + eps
t = rotation_matrix[:, 0, 0] + rotation_matrix[:, 1, 1] + rotation_matrix[:, 2, 2]
t = t.unsqueeze(1)
# use atan2 instead of torch.acos((t - 1)/2) for numerical stability
theta = torch.atan2(r, t - 1)
axis = axis / r
angle_axis = theta * axis
return angle_axis
|
def rotation_matrix_to_angle_axis(rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:
r"""Convert 3x3 rotation matrix to Rodrigues vector.
Args:
rotation_matrix: rotation matrix.
Returns:
Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 3)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_angle_axis(input) # Nx3
"""
if not isinstance(rotation_matrix, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(rotation_matrix)}")
if not rotation_matrix.shape[-2:] == (3, 3):
raise ValueError(f"Input size must be a (*, 3, 3) tensor. Got {rotation_matrix.shape}")
axis = torch.zeros((rotation_matrix.shape[0], 3), device=rotation_matrix.device)
axis[:, 0] = rotation_matrix[:, 2, 1] - rotation_matrix[:, 1, 2]
axis[:, 1] = rotation_matrix[:, 0, 2] - rotation_matrix[:, 2, 0]
axis[:, 2] = rotation_matrix[:, 1, 0] - rotation_matrix[:, 0, 1]
# add epsilon for numerical stability
r = torch.norm(axis, dim=1).unsqueeze(1) + eps
t = rotation_matrix[:, 0, 0] + rotation_matrix[:, 1, 1] + rotation_matrix[:, 2, 2]
t = t.unsqueeze(1)
# use atan2 instead of torch.acos((t - 1)/2) for numerical stability
theta = torch.atan2(r, t - 1)
axis = axis / (r + eps)
angle_axis = theta * axis
return angle_axis
|
44,066 |
def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
"""
Fragments a cut graph into a collection of subgraphs as well as returning
the communication/quotient graph.
Args:
graph (MultiDiGraph): directed multigraph containing measure and prepare
nodes at cut locations
Returns:
subgraphs, communication_graph (Tuple[Tuple[MultiDiGraph], MultiDiGraph]):
the subgraphs of the cut graph and the communication graph where each
node represents a fragment and edges denote the flow of qubits between
fragments
**Example**
Consider the following circuit with the manually-placed wire cuts:
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the corresponding graph, remove all the wire cut nodes, and
find the subgraphs and communication graph by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> qcut.fragment_graph(graph)
((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>),
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>)
"""
edges = list(graph.edges)
cut_edges = []
for node1, node2, _ in edges:
if isinstance(node1, MeasureNode):
assert isinstance(node2, PrepareNode)
cut_edges.append((node1, node2))
graph.remove_edge(node1, node2)
subgraph_nodes = weakly_connected_components(graph)
subgraphs = tuple(graph.subgraph(n) for n in subgraph_nodes)
communication_graph = MultiDiGraph()
communication_graph.add_nodes_from(range(len(subgraphs)))
for node1, node2 in cut_edges:
for i, subgraph in enumerate(subgraphs):
if subgraph.has_node(node1):
start_fragment = i
if subgraph.has_node(node2):
end_fragment = i
communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2))
return subgraphs, communication_graph
|
def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
"""
Fragments a graph into a collection of subgraphs as well as returning
the communication/quotient graph.
Args:
graph (MultiDiGraph): directed multigraph containing measure and prepare
nodes at cut locations
Returns:
subgraphs, communication_graph (Tuple[Tuple[MultiDiGraph], MultiDiGraph]):
the subgraphs of the cut graph and the communication graph where each
node represents a fragment and edges denote the flow of qubits between
fragments
**Example**
Consider the following circuit with the manually-placed wire cuts:
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the corresponding graph, remove all the wire cut nodes, and
find the subgraphs and communication graph by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> qcut.fragment_graph(graph)
((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>),
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>)
"""
edges = list(graph.edges)
cut_edges = []
for node1, node2, _ in edges:
if isinstance(node1, MeasureNode):
assert isinstance(node2, PrepareNode)
cut_edges.append((node1, node2))
graph.remove_edge(node1, node2)
subgraph_nodes = weakly_connected_components(graph)
subgraphs = tuple(graph.subgraph(n) for n in subgraph_nodes)
communication_graph = MultiDiGraph()
communication_graph.add_nodes_from(range(len(subgraphs)))
for node1, node2 in cut_edges:
for i, subgraph in enumerate(subgraphs):
if subgraph.has_node(node1):
start_fragment = i
if subgraph.has_node(node2):
end_fragment = i
communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2))
return subgraphs, communication_graph
|
35,579 |
def draw_bounding_boxes(
image: torch.Tensor,
boxes: torch.Tensor,
labels: torch.Tensor,
label_names: List[int] = None,
colors: Dict[int, str] = None,
draw_labels: bool = True,
width: int = 1
) -> torch.Tensor:
"""
Draws bounding boxes on given image.
Args:
image (Tensor): Tensor of shape (C x H x W)
bboxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format.
labels (Tensor): Tensor of size (N) Labels for each bounding boxes.
label_names (List): List containing labels excluding background.
colors (dict): Dict with key as label id and value as color name.
draw_labels (bool): If True draws label names on bounding boxes.
width (int): Width of bounding box.
"""
# Code co-contributed by sumanthratna
# Currently works for (C x H x W) images, but I think we should extend.
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
if not (torch.is_tensor(image)):
raise TypeError('tensor expected, got {}'.format(type(image)))
if label_names is not None:
# Since for our detection models class 0 is background
label_names.insert(0, "__background__")
ndarr = image.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
# Neceassary check since FRCNN returns boxes which have grad enabled.
if(boxes.requires_grad):
boxes = boxes.detach()
boxes = boxes.to('cpu').numpy().astype('int').tolist()
labels = labels.to('cpu').numpy().astype('int').tolist()
img_to_draw = Image.fromarray(ndarr)
draw = ImageDraw.Draw(img_to_draw)
for bbox, label in zip(boxes, labels):
if colors is None:
draw.rectangle(bbox, width=width)
else:
draw.rectangle(bbox, width=width, outline=colors[label])
if label_names is None:
draw.text((bbox[0], bbox[1]), str(label))
else:
if draw_labels is True:
draw.text((bbox[0], bbox[1]), label_names[int(label)])
return torch.from_numpy(np.array(img_to_draw))
|
def draw_bounding_boxes(
image: torch.Tensor,
boxes: torch.Tensor,
labels: torch.Tensor,
label_names: List[int] = None,
colors: Dict[int, str] = None,
draw_labels: bool = True,
width: int = 1
) -> torch.Tensor:
"""
Draws bounding boxes on given image.
Args:
image (Tensor): Tensor of shape (C x H x W)
bboxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format.
labels (Tensor): Tensor of size (N) Labels for each bounding boxes.
label_names (List): List containing labels excluding background.
colors (dict): Dict with key as label id and value as color name.
draw_labels (bool): If True (default) draws label names on bounding boxes.
width (int): Width of bounding box.
"""
# Code co-contributed by sumanthratna
# Currently works for (C x H x W) images, but I think we should extend.
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
if not (torch.is_tensor(image)):
raise TypeError('tensor expected, got {}'.format(type(image)))
if label_names is not None:
# Since for our detection models class 0 is background
label_names.insert(0, "__background__")
ndarr = image.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
# Neceassary check since FRCNN returns boxes which have grad enabled.
if(boxes.requires_grad):
boxes = boxes.detach()
boxes = boxes.to('cpu').numpy().astype('int').tolist()
labels = labels.to('cpu').numpy().astype('int').tolist()
img_to_draw = Image.fromarray(ndarr)
draw = ImageDraw.Draw(img_to_draw)
for bbox, label in zip(boxes, labels):
if colors is None:
draw.rectangle(bbox, width=width)
else:
draw.rectangle(bbox, width=width, outline=colors[label])
if label_names is None:
draw.text((bbox[0], bbox[1]), str(label))
else:
if draw_labels is True:
draw.text((bbox[0], bbox[1]), label_names[int(label)])
return torch.from_numpy(np.array(img_to_draw))
|
15,088 |
def _categorize_programs(hass: HomeAssistantType, programs: dict) -> None:
"""Categorize the ISY994 programs."""
for platform in SUPPORTED_PROGRAM_PLATFORMS:
try:
folder = programs[KEY_MY_PROGRAMS][f"HA.{platform}"]
except KeyError:
pass
else:
for dtype, _, node_id in folder.children:
if dtype != KEY_FOLDER:
continue
entity_folder = folder[node_id]
try:
status = entity_folder[KEY_STATUS]
assert status.dtype == "program", "Not a program"
if platform != BINARY_SENSOR:
actions = entity_folder[KEY_ACTIONS]
assert actions.dtype == "program", "Not a program"
else:
actions = None
except (AttributeError, KeyError, AssertionError):
_LOGGER.warning(
"Program entity '%s' not loaded due "
"to invalid folder structure.",
entity_folder.name,
)
continue
entity = (entity_folder.name, status, actions)
hass.data[ISY994_PROGRAMS][platform].append(entity)
|
def _categorize_programs(hass: HomeAssistantType, programs: dict) -> None:
"""Categorize the ISY994 programs."""
for platform in SUPPORTED_PROGRAM_PLATFORMS:
try:
folder = programs[KEY_MY_PROGRAMS][f"HA.{platform}"]
except KeyError:
continue
else:
for dtype, _, node_id in folder.children:
if dtype != KEY_FOLDER:
continue
entity_folder = folder[node_id]
try:
status = entity_folder[KEY_STATUS]
assert status.dtype == "program", "Not a program"
if platform != BINARY_SENSOR:
actions = entity_folder[KEY_ACTIONS]
assert actions.dtype == "program", "Not a program"
else:
actions = None
except (AttributeError, KeyError, AssertionError):
_LOGGER.warning(
"Program entity '%s' not loaded due "
"to invalid folder structure.",
entity_folder.name,
)
continue
entity = (entity_folder.name, status, actions)
hass.data[ISY994_PROGRAMS][platform].append(entity)
|
43,615 |
def sd_excitations(n_electrons, n_orbitals, delta_sz):
r"""Generates single and double excitations from a Hartree-Fock (HF) reference state
The singly- and doubly-excited configurations are generated by acting with the operators
:math:`\hat T_1` and :math:`\hat T_2` on the HF state:
.. math:
&& \vert \Phi_\mathrm{S} \rangle = \hat{T}_1 \vert \mathrm{HF} \rangle = \sum_{r \in
\mathrm{occ} \\ p \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF}
\rangle \\
&& \vert \Phi_\mathrm{D} \rangle = \hat{T}_2 \vert \mathrm{HF} \rangle = \sum_{r>s \in
\mathrm{occ} \\ p>q \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_q^\dagger
\hat{c}_r \hat{c}_s \vert \mathrm{HF} \rangle
where the indices :math:`r, s` and :math:`p, q` run over the occupied (occ) and unoccupied,
referred to as virtual (virt), molecular orbitals and :math:`\hat c` and
:math:`\hat c^\dagger` are the electron annihilation and creation operators, respectively.
**Example usage:**
>>> ph, pphh = sd_configs(2, 4, 0)
>>> print(ph)
[[0, 2], [1, 3]]
>>> print(pphh)
[[0, 1, 2, 3]]
Args:
n_electrons (int): number of active electrons
n_orbitals (int): number of active orbitals
delta_sz (int): spin-projection selection rule.
For single excitations ``sz[p] - sz[r] = delta_sz``.
For double excitations ``sz[p] + sz[p] - sz[r] - sz[s] = delta_sz``.
``sz`` is the single-particle state spin quantum number and ``delta_sz``, in the
case of singles and doubles, can take the values :math:`0`, :math:`\pm 1`
and :math:`\pm 2`.
Returns:
tuple(list, list): lists with the indices of the molecular orbitals
involved in the single and double excitations
"""
if not n_electrons > 0:
raise ValueError(
"The number of active electrons has to be greater than 0 \n"
"Got n_electrons = {}".format(n_electrons)
)
if n_orbitals <= n_electrons:
raise ValueError(
"The number of active orbitals ({}) "
"has to be greater than the number of active electrons ({})."
.format(n_orbitals, n_electrons)
)
if int(delta_sz) not in (0, 1, -1, 2, -2):
raise ValueError(
"Expected values for 'delta_sz' are 0, +/- 1 and +/- 2 but got ({})."
.format(delta_sz)
)
# define the spin quantum number 'sz' of each orbital
sz = np.array([0.5 if (i % 2 == 0) else -0.5 for i in range(n_orbitals)])
# nested list with the indices 'p, r' for each 1particle-1hole (ph) configuration
ph = [
[r, p]
for r in range(n_electrons) for p in range(n_electrons, n_orbitals)
if sz[p]-sz[r] == delta_sz
]
# nested list with the indices 's, r, q, p' for each 2particle-2hole (pphh) configuration
pphh = [
[s, r, q, p]
for s in range(n_electrons-1) for r in range(s+1, n_electrons)
for q in range(n_electrons, n_orbitals-1) for p in range(q+1, n_orbitals)
if (sz[p] + sz[q] - sz[r] - sz[s]) == delta_sz
]
return ph, pphh
|
def sd_excitations(n_electrons, n_orbitals, delta_sz):
r"""Generates single and double excitations from a Hartree-Fock (HF) reference state
The singly- and doubly-excited configurations are generated by acting with the operators
:math:`\hat T_1` and :math:`\hat T_2` on the HF state:
.. math:
&& \vert \Phi_\mathrm{S} \rangle = \hat{T}_1 \vert \mathrm{HF} \rangle = \sum_{r \in
\mathrm{occ} \\ p \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF}
\rangle \\
&& \vert \Phi_\mathrm{D} \rangle = \hat{T}_2 \vert \mathrm{HF} \rangle = \sum_{r>s \in
\mathrm{occ} \\ p>q \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_q^\dagger
\hat{c}_r \hat{c}_s \vert \mathrm{HF} \rangle
where the indices :math:`r, s` and :math:`p, q` run over the occupied (occ) and unoccupied,
referred to as virtual (virt), molecular orbitals and :math:`\hat c` and
:math:`\hat c^\dagger` are the electron annihilation and creation operators, respectively.
**Example**
>>> ph, pphh = sd_configs(2, 4, 0)
>>> print(ph)
[[0, 2], [1, 3]]
>>> print(pphh)
[[0, 1, 2, 3]]
Args:
n_electrons (int): number of active electrons
n_orbitals (int): number of active orbitals
delta_sz (int): spin-projection selection rule.
For single excitations ``sz[p] - sz[r] = delta_sz``.
For double excitations ``sz[p] + sz[p] - sz[r] - sz[s] = delta_sz``.
``sz`` is the single-particle state spin quantum number and ``delta_sz``, in the
case of singles and doubles, can take the values :math:`0`, :math:`\pm 1`
and :math:`\pm 2`.
Returns:
tuple(list, list): lists with the indices of the molecular orbitals
involved in the single and double excitations
"""
if not n_electrons > 0:
raise ValueError(
"The number of active electrons has to be greater than 0 \n"
"Got n_electrons = {}".format(n_electrons)
)
if n_orbitals <= n_electrons:
raise ValueError(
"The number of active orbitals ({}) "
"has to be greater than the number of active electrons ({})."
.format(n_orbitals, n_electrons)
)
if int(delta_sz) not in (0, 1, -1, 2, -2):
raise ValueError(
"Expected values for 'delta_sz' are 0, +/- 1 and +/- 2 but got ({})."
.format(delta_sz)
)
# define the spin quantum number 'sz' of each orbital
sz = np.array([0.5 if (i % 2 == 0) else -0.5 for i in range(n_orbitals)])
# nested list with the indices 'p, r' for each 1particle-1hole (ph) configuration
ph = [
[r, p]
for r in range(n_electrons) for p in range(n_electrons, n_orbitals)
if sz[p]-sz[r] == delta_sz
]
# nested list with the indices 's, r, q, p' for each 2particle-2hole (pphh) configuration
pphh = [
[s, r, q, p]
for s in range(n_electrons-1) for r in range(s+1, n_electrons)
for q in range(n_electrons, n_orbitals-1) for p in range(q+1, n_orbitals)
if (sz[p] + sz[q] - sz[r] - sz[s]) == delta_sz
]
return ph, pphh
|
30,430 |
def build_misp_complex_filter(demisto_query: str):
"""
Args:
demisto_query: complex query contains saved words: 'AND:', 'OR:' and 'NOT:'
using ',' as delimiter for parameters and ';' as delimiter for operators.
using the operators is optional.
if 'demisto_query' does not contains any of the complex operators the original
input will be returned
Returns:
dict: dictionary created for misp to perform complex auery
or if no complex qury found retruns the original input
Example:
demisto_query should look like:
example 1: "AND:param1,param2;OR:param3;NOT:param4,param5"
example 2: "NOT:param3,param5"
example 3 (simple syntax): "param1,param2"
"""
regexAnd = r"(AND:)([^\;]+)(;)"
regexOr = r"(OR:)([^\;]+)(;)"
regexNot = r"(NOT:)([^\;]+)(;)"
andList = None
orList = None
notList = None
isComplexSearch = False
matchAnd = re.search(regexAnd, demisto_query, re.MULTILINE)
matchOr = re.search(regexOr, demisto_query, re.MULTILINE)
matchNot = re.search(regexNot, demisto_query, re.MULTILINE)
if matchAnd is not None:
andList = matchAnd.group(2).split(',')
isComplexSearch = True
if matchOr is not None:
orList = matchOr.group(2).split(',')
isComplexSearch = True
if matchNot is not None:
notList = matchNot.group(2).split(',')
isComplexSearch = True
if isComplexSearch:
misp_complex_query = MISP.build_complex_query(
or_parameters = orList,
and_parameters = andList,
not_parameters = notList)
return misp_complex_query
return demisto_query
|
def build_misp_complex_filter(demisto_query: str):
"""
Args:
demisto_query: complex query contains saved words: 'AND:', 'OR:' and 'NOT:'
using ',' as delimiter for parameters and ';' as delimiter for operators.
using the operators is optional.
if 'demisto_query' does not contains any of the complex operators the original
input will be returned
Returns:
dict: dictionary created for misp to perform complex auery
or if no complex qury found retruns the original input
Example:
demisto_query should look like:
example 1: "AND:param1,param2;OR:param3;NOT:param4,param5"
example 2: "NOT:param3,param5"
example 3 (simple syntax): "param1,param2"
"""
regexAnd = r'(AND:)([^\;]+)(;)'
regexOr = r"(OR:)([^\;]+)(;)"
regexNot = r"(NOT:)([^\;]+)(;)"
andList = None
orList = None
notList = None
isComplexSearch = False
matchAnd = re.search(regexAnd, demisto_query, re.MULTILINE)
matchOr = re.search(regexOr, demisto_query, re.MULTILINE)
matchNot = re.search(regexNot, demisto_query, re.MULTILINE)
if matchAnd is not None:
andList = matchAnd.group(2).split(',')
isComplexSearch = True
if matchOr is not None:
orList = matchOr.group(2).split(',')
isComplexSearch = True
if matchNot is not None:
notList = matchNot.group(2).split(',')
isComplexSearch = True
if isComplexSearch:
misp_complex_query = MISP.build_complex_query(
or_parameters = orList,
and_parameters = andList,
not_parameters = notList)
return misp_complex_query
return demisto_query
|
38,340 |
def get_filenames_from_glob_pattern(pattern):
epattern = os.path.expanduser(pattern)
data_dir = ytcfg.get("yt", "test_data_dir")
file_list = glob.glob(epattern) or glob.glob(os.path.join(data_dir, epattern))
if not file_list:
raise FileNotFoundError("No file matched this pattern '%s'" % pattern)
return sorted(file_list)
|
def get_filenames_from_glob_pattern(pattern):
epattern = os.path.expanduser(pattern)
data_dir = ytcfg.get("yt", "test_data_dir")
file_list = glob.glob(epattern) or glob.glob(os.path.join(data_dir, epattern))
if not file_list:
raise FileNotFoundError("No file matched this pattern or pattern(s) {}".format(pattern))
return sorted(file_list)
|
44,195 |
def factorize(two, tol):
r"""Return double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha},
where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right)
\phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|}
dr_1 dr_2.
Rearranging the integrals in the chemist notation, [11|22], gives
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}.
with
.. math::
T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}.
and :math:`V` is the two-electron tensor in chemist notation.
The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that
.. math::
V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}.
with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized
and truncated in a second level of factorization.
The algorithm has the following steps
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_].
1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \
:math:`n^2 \times n^2` matrix where n is the number of orbitals.
2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \
corresponding eigenvalues larger than a threshold.
3. Reshape the selected eigenvectors to :math:`n \times n` matrices.
4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \
eigenvalues is larger than a threshold.
Args:
two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis
arranged in chemist notation [11|22]
tol (float): cutoff value for discarding the negligible factors
Returns:
tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron
tensor, eigenvalues of the generated factors, eigenvectors of the generated factors
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation
>>> l, w, v = factorize(two, 1e-5)
>>> print(l)
[[[ 1.06723440e-01 9.73575768e-15]
[ 8.36288956e-15 -1.04898533e-01]]
[[-2.20945401e-13 -4.25688222e-01]
[-4.25688222e-01 -2.98228790e-13]]
[[-8.14472856e-01 5.01669019e-13]
[ 5.01689072e-13 -8.28642140e-01]]]
"""
n = two.shape[0]
two = two.reshape(n * n, n * n)
eigvals, eigvecs = np.linalg.eigh(two)
eigvals = np.array([val for val in eigvals if abs(val) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals)))
factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))])
eigvals, eigvecs = np.linalg.eigh(factors)
eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
return factors, eigvals, eigvecs
|
def factorize(two, tol):
r"""Return the double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha},
where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right)
\phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|}
dr_1 dr_2.
Rearranging the integrals in the chemist notation, [11|22], gives
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}.
with
.. math::
T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}.
and :math:`V` is the two-electron tensor in chemist notation.
The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that
.. math::
V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}.
with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized
and truncated in a second level of factorization.
The algorithm has the following steps
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_].
1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \
:math:`n^2 \times n^2` matrix where n is the number of orbitals.
2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \
corresponding eigenvalues larger than a threshold.
3. Reshape the selected eigenvectors to :math:`n \times n` matrices.
4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \
eigenvalues is larger than a threshold.
Args:
two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis
arranged in chemist notation [11|22]
tol (float): cutoff value for discarding the negligible factors
Returns:
tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron
tensor, eigenvalues of the generated factors, eigenvectors of the generated factors
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation
>>> l, w, v = factorize(two, 1e-5)
>>> print(l)
[[[ 1.06723440e-01 9.73575768e-15]
[ 8.36288956e-15 -1.04898533e-01]]
[[-2.20945401e-13 -4.25688222e-01]
[-4.25688222e-01 -2.98228790e-13]]
[[-8.14472856e-01 5.01669019e-13]
[ 5.01689072e-13 -8.28642140e-01]]]
"""
n = two.shape[0]
two = two.reshape(n * n, n * n)
eigvals, eigvecs = np.linalg.eigh(two)
eigvals = np.array([val for val in eigvals if abs(val) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals)))
factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))])
eigvals, eigvecs = np.linalg.eigh(factors)
eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
return factors, eigvals, eigvecs
|
2,939 |
def UnicodeWriter(
f: TextIO, dialect: Type[csv.excel] = csv.excel, encoding: str = "utf-8", **kwds
):
return csv.writer(f, dialect=dialect, **kwds)
|
def UnicodeWriter(
f: TextIO, dialect: Type[csv.Dialect] = csv.excel, encoding: str = "utf-8", **kwds
):
return csv.writer(f, dialect=dialect, **kwds)
|
1,190 |
def load(filename, **kwargs):
''' Load file given filename, guessing at file type
Parameters
----------
filename : string
specification of file to load
\*\*kwargs : keyword arguments
Keyword arguments to format-specific load
Returns
-------
img : ``SpatialImage``
Image of guessed type
'''
try:
stat_result = os.stat(filename)
except OSError:
raise FileNotFoundError("No such file or no access: '%s'" % filename)
if stat_result.st_size <= 0:
raise ImageFileError("Empty file: '%s'" % filename)
sniff = None
for image_klass in all_image_classes:
is_valid, sniff = image_klass.path_maybe_image(filename, sniff)
if is_valid:
if image_klass is PARRECImage and '.REC' in filename:
# a .REC file can have either a .PAR of .xml header.
# This skip case assumes PARRECImage is beforeXMLRECImage in
# all_image_classes.
par_exists = os.path.exists(filename.replace('.REC', '.PAR'))
xml_exists = os.path.exists(filename.replace('.REC', '.xml'))
if not par_exists and xml_exists:
continue # skip trying .PAR and proceed to .xml
print(image_klass)
img = image_klass.from_filename(filename, **kwargs)
return img
raise ImageFileError('Cannot work out file type of "%s"' %
filename)
|
def load(filename, **kwargs):
''' Load file given filename, guessing at file type
Parameters
----------
filename : string
specification of file to load
\*\*kwargs : keyword arguments
Keyword arguments to format-specific load
Returns
-------
img : ``SpatialImage``
Image of guessed type
'''
try:
stat_result = os.stat(filename)
except OSError:
raise FileNotFoundError("No such file or no access: '%s'" % filename)
if stat_result.st_size <= 0:
raise ImageFileError("Empty file: '%s'" % filename)
sniff = None
for image_klass in all_image_classes:
is_valid, sniff = image_klass.path_maybe_image(filename, sniff)
if is_valid:
if image_klass is PARRECImage and filename.endswith('.REC'):
# a .REC file can have either a .PAR of .xml header.
# This skip case assumes PARRECImage is beforeXMLRECImage in
# all_image_classes.
par_exists = os.path.exists(filename.replace('.REC', '.PAR'))
xml_exists = os.path.exists(filename.replace('.REC', '.xml'))
if not par_exists and xml_exists:
continue # skip trying .PAR and proceed to .xml
print(image_klass)
img = image_klass.from_filename(filename, **kwargs)
return img
raise ImageFileError('Cannot work out file type of "%s"' %
filename)
|
24,754 |
def _has_different_parameters(
original: List[astroid.AssignName],
overridden: List[astroid.AssignName],
dummy_parameter_regex: Pattern,
counter: int,
):
result = []
zipped = zip_longest(original, overridden)
for original_param, overridden_param in zipped:
params = (original_param, overridden_param)
if not all(params):
return ["Number of parameters has changed in"]
# check for the arguments' type
original_type = original_param.parent.annotations[counter]
if original_type is not None:
original_type = str(original_param.parent.annotations[counter].name)
overridden_type = overridden_param.parent.annotations[counter]
if overridden_type is not None:
overridden_type = str(overridden_param.parent.annotations[counter].name)
if original_type != overridden_type:
result.append(
"Parameter '"
+ str(original_param.name)
+ "' was of type '"
+ original_type
+ "' and is now of type '"
+ overridden_type
+ "' in"
)
counter += 1
# check for the arguments' name
names = [param.name for param in params]
if any(dummy_parameter_regex.match(name) for name in names):
continue
if original_param.name != overridden_param.name:
result.append(
"Parameter '" + str(original_param.name) + "' has been renamed in"
)
return result
|
def _has_different_parameters(
original: List[astroid.AssignName],
overridden: List[astroid.AssignName],
dummy_parameter_regex: Pattern,
counter: int,
):
result = []
zipped = zip_longest(original, overridden)
for original_param, overridden_param in zipped:
params = (original_param, overridden_param)
if not all(params):
return ["Number of parameters has changed in"]
# check for the arguments' type
original_type = original_param.parent.annotations[counter]
if original_type is not None:
original_type = str(original_param.parent.annotations[counter].name)
overridden_type = overridden_param.parent.annotations[counter]
if overridden_type is not None:
overridden_type = str(overridden_param.parent.annotations[counter].name)
if original_type != overridden_type:
result.append(
"Parameter '"
+ str(original_param.name)
+ "' was of type '"
+ original_type
+ "' and is now of type '"
+ overridden_type
+ "' in"
)
counter += 1
# check for the arguments' name
names = [param.name for param in params]
if any(dummy_parameter_regex.match(name) for name in names):
continue
if original_param.name != overridden_param.name:
result.append(
f"Parameter '{original_param.name}' has been renamed to '{overridden_param.name}' in"
)
return result
|
7,440 |
def _center(x, oshape):
"""Return an array of oshape from the center of x.
"""
start = (np.array(x.shape) - np.array(oshape)) // 2 + 1
out = x[tuple(slice(s, s + n) for s, n in zip(start, oshape))]
return out
|
def _center(x, oshape):
"""Return an array of shape ``oshape`` from the center of array ``x``.
"""
start = (np.array(x.shape) - np.array(oshape)) // 2 + 1
out = x[tuple(slice(s, s + n) for s, n in zip(start, oshape))]
return out
|
8,648 |
def find_config(config_dir, name, extension='.cfg'):
"""Build the absolute path for the given configuration file ``name``
:param str config_dir: path to the configuration directory
:param str name: configuration file ``name``
:param str extension: configuration file's extension (default to ``.cfg``)
:return: the path of the configuration file, either in the current
directory or from the ``config_dir`` directory
This function tries different location:
* the current directory,
* the ``config_dir`` directory with the ``extension`` suffix,
* the ``config_dir`` directory without a suffix,
Example::
>>> from sopel import run_script
>>> os.listdir()
['local.cfg', 'extra.ini']
>>> os.listdir(config.DEFAULT_HOMEDIR)
['config.cfg', 'extra.ini', 'module.cfg', 'README']
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local.cfg')
'local.cfg'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local')
'/home/username/.sopel/local'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'config')
'/home/username/.sopel/config.cfg'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'extra', '.ini')
'/home/username/.sopel/extra.ini'
"""
if os.path.isfile(name):
return name
name_ext = name + extension
for config in enumerate_configs(config_dir, extension):
if name_ext == config:
return os.path.join(config_dir, name_ext)
return os.path.join(config_dir, name)
|
def find_config(config_dir, name, extension='.cfg'):
"""Build the absolute path for the given configuration file ``name``
:param str config_dir: path to the configuration directory
:param str name: configuration file ``name``
:param str extension: configuration file's extension (default to ``.cfg``)
:return: the path of the configuration file, either in the current
directory or from the ``config_dir`` directory
This function tries different locations:
* the current directory,
* the ``config_dir`` directory with the ``extension`` suffix,
* the ``config_dir`` directory without a suffix,
Example::
>>> from sopel import run_script
>>> os.listdir()
['local.cfg', 'extra.ini']
>>> os.listdir(config.DEFAULT_HOMEDIR)
['config.cfg', 'extra.ini', 'module.cfg', 'README']
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local.cfg')
'local.cfg'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local')
'/home/username/.sopel/local'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'config')
'/home/username/.sopel/config.cfg'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'extra', '.ini')
'/home/username/.sopel/extra.ini'
"""
if os.path.isfile(name):
return name
name_ext = name + extension
for config in enumerate_configs(config_dir, extension):
if name_ext == config:
return os.path.join(config_dir, name_ext)
return os.path.join(config_dir, name)
|
17,949 |
def apply_thresholds(input, thresholds, choices):
"""Makes a choice based on an input and thresholds.
From list of ``choices``, it selects one of them based on a list of
inputs, depending on the position of each ``input`` whithin a list of
``thresholds``. It does so for each ``input`` provided.
Args:
input: A list of inputs to make a choice.
thresholds: A list of thresholds to choose.
choices: A list of the possible choices.
Returns:
:obj:`numpy.ndarray` of :obj:`float`:
A list of the choices made.
Raises:
:exc:`AssertionError`: When the number of ``thresholds`` (t) and the
number of choices (c) are not either t == c or t == c - 1.
Examples:
>>> apply_thresholds(np.array([4]), [5, 7], [10, 15, 20])
array([10])
>>> apply_thresholds(np.array([5]), [5, 7], [10, 15, 20])
array([10])
>>> apply_thresholds(np.array([6]), [5, 7], [10, 15, 20])
array([15])
>>> apply_thresholds(np.array([8]), [5, 7], [10, 15, 20])
array([20])
>>> apply_thresholds(np.array([10]), [5, 7, 9], [10, 15, 20])
array([0])
"""
condlist = [input <= threshold for threshold in thresholds]
if len(condlist) == len(choices) - 1:
# If a choice is provided for input > highest threshold, last condition must be true to return it.
condlist += [True]
assert len(condlist) == len(choices), \
"apply_thresholds must be called with the same number of thresholds than choices, or one more choice"
return numpy.select(condlist, choices)
|
def apply_thresholds(input, thresholds, choices):
"""Makes a choice based on an input and thresholds.
From list of ``choices``, it selects one of them based on a list of
inputs, depending on the position of each ``input`` whithin a list of
``thresholds``. It does so for each ``input`` provided.
Args:
input: A list of inputs to make a choice from.
thresholds: A list of thresholds to choose.
choices: A list of the possible choices.
Returns:
:obj:`numpy.ndarray` of :obj:`float`:
A list of the choices made.
Raises:
:exc:`AssertionError`: When the number of ``thresholds`` (t) and the
number of choices (c) are not either t == c or t == c - 1.
Examples:
>>> apply_thresholds(np.array([4]), [5, 7], [10, 15, 20])
array([10])
>>> apply_thresholds(np.array([5]), [5, 7], [10, 15, 20])
array([10])
>>> apply_thresholds(np.array([6]), [5, 7], [10, 15, 20])
array([15])
>>> apply_thresholds(np.array([8]), [5, 7], [10, 15, 20])
array([20])
>>> apply_thresholds(np.array([10]), [5, 7, 9], [10, 15, 20])
array([0])
"""
condlist = [input <= threshold for threshold in thresholds]
if len(condlist) == len(choices) - 1:
# If a choice is provided for input > highest threshold, last condition must be true to return it.
condlist += [True]
assert len(condlist) == len(choices), \
"apply_thresholds must be called with the same number of thresholds than choices, or one more choice"
return numpy.select(condlist, choices)
|
35,531 |
def extract_casync_image(target_slot_number: int, partition: dict, cloudlog):
path = get_partition_path(target_slot_number, partition)
seed_path = path[:-1] + ('b' if path[-1] == 'a' else 'a')
target = casync.parse_caibx(partition['casync_caibx'])
sources: List[Tuple[str, casync.ChunkReader, casync.ChunkDict]] = []
# First source is the current partition.
try:
raw_hash = get_raw_hash(seed_path, partition['size'])
caibx_url = f"{CAIBX_URL}{partition['name']}-{raw_hash}.caibx"
try:
cloudlog.info(f"casync fetching {caibx_url}")
sources += [('seed', casync.FileChunkReader(seed_path), casync.build_chunk_dict(casync.parse_caibx(caibx_url)))]
except requests.RequestException:
cloudlog.error(f"casync failed to load {caibx_url}")
except Exception:
cloudlog.exception("Failed to hash seed partition")
# Second source is the target partition, this allows for resuming
sources += [('target', casync.FileChunkReader(path), casync.build_chunk_dict(target))]
# Finally we add the remote source to download any missing chunks
sources += [('remote', casync.RemoteChunkReader(partition['casync_store']), casync.build_chunk_dict(target))]
last_p = 0
def progress(cur):
nonlocal last_p
p = int(cur / partition['size'] * 100)
if p != last_p:
last_p = p
print(f"Installing {partition['name']}: {p}", flush=True)
stats = casync.extract(target, sources, path, progress)
cloudlog.error(f'casync done {json.dumps(stats)}')
os.sync()
if not verify_partition(target_slot_number, partition, force_full_check=True):
raise Exception(f"Raw hash mismatch '{partition['hash_raw'].lower()}'")
|
def extract_casync_image(target_slot_number: int, partition: dict, cloudlog):
path = get_partition_path(target_slot_number, partition)
seed_path = path[:-1] + ('b' if path[-1] == 'a' else 'a')
target = casync.parse_caibx(partition['casync_caibx'])
sources: List[Tuple[str, casync.ChunkReader, casync.ChunkDict]] = []
# First source is the current partition.
try:
raw_hash = get_raw_hash(seed_path, partition['size'])
caibx_url = f"{CAIBX_URL}{partition['name']}-{raw_hash}.caibx"
try:
cloudlog.info(f"casync fetching {caibx_url}")
sources += [('seed', casync.FileChunkReader(seed_path), casync.build_chunk_dict(casync.parse_caibx(caibx_url)))]
except requests.RequestException:
cloudlog.error(f"casync failed to load {caibx_url}")
except Exception:
cloudlog.exception("casync failed to hash seed partition")
# Second source is the target partition, this allows for resuming
sources += [('target', casync.FileChunkReader(path), casync.build_chunk_dict(target))]
# Finally we add the remote source to download any missing chunks
sources += [('remote', casync.RemoteChunkReader(partition['casync_store']), casync.build_chunk_dict(target))]
last_p = 0
def progress(cur):
nonlocal last_p
p = int(cur / partition['size'] * 100)
if p != last_p:
last_p = p
print(f"Installing {partition['name']}: {p}", flush=True)
stats = casync.extract(target, sources, path, progress)
cloudlog.error(f'casync done {json.dumps(stats)}')
os.sync()
if not verify_partition(target_slot_number, partition, force_full_check=True):
raise Exception(f"Raw hash mismatch '{partition['hash_raw'].lower()}'")
|
12,274 |
def read_qasm(qasm_input, mode="qiskit", version="2.0", strmode=False):
'''
Read OpenQASM intermediate representation
(https://github.com/Qiskit/openqasm) and return
a QubitCircuit and state inputs as specified in the
QASM file.
Parameters
----------
qasm_input : str
File location or String Input for QASM file to be imported. In case of
string input, the parameter strmode must be True.
mode : str
QASM mode to be read in. When mode is "qiskit",
the "qelib1.inc" include is automatically included,
without checking externally. Otherwise, each include is
processed.
version : str
QASM version of the QASM file. Only version 2.0 is currently supported.
strmode : bool
if specified as True, indicates that qasm_input is in string format
rather than from file.
Returns
-------
qc : QubitCircuit
Returns QubitCircuit specified in the QASM file.
'''
if strmode:
qasm_lines = qasm_input.splitlines()
else:
f = open(qasm_input, "r")
qasm_lines = f.read().splitlines()
f.close()
# split input into lines and ignore comments
qasm_lines = [line.strip() for line in qasm_lines]
qasm_lines = list(filter(lambda x: x[:2] != "//" and x != "", qasm_lines))
if version != "2.0":
raise NotImplementedError("QASM: Only OpenQASM 2.0 \
is currently supported.")
if qasm_lines.pop(0) != "OPENQASM 2.0;":
raise SyntaxError("QASM: File does not contain QASM 2.0 header")
qasm_obj = QasmProcessor(qasm_lines, mode=mode, version=version)
qasm_obj.commands = _tokenize(qasm_obj.commands)
qasm_obj._process_includes()
qasm_obj._initialize_pass()
qc = QubitCircuit(qasm_obj.num_qubits, num_cbits=qasm_obj.num_cbits)
qasm_obj._final_pass(qc)
return qc
|
def read_qasm(qasm_input, mode="qiskit", version="2.0", strmode=False):
'''
Read OpenQASM intermediate representation
(https://github.com/Qiskit/openqasm) and return
a :class:`.QubitCircuit` and state inputs as specified in the
QASM file.
Parameters
----------
qasm_input : str
File location or String Input for QASM file to be imported. In case of
string input, the parameter strmode must be True.
mode : str
QASM mode to be read in. When mode is "qiskit",
the "qelib1.inc" include is automatically included,
without checking externally. Otherwise, each include is
processed.
version : str
QASM version of the QASM file. Only version 2.0 is currently supported.
strmode : bool
if specified as True, indicates that qasm_input is in string format
rather than from file.
Returns
-------
qc : QubitCircuit
Returns QubitCircuit specified in the QASM file.
'''
if strmode:
qasm_lines = qasm_input.splitlines()
else:
f = open(qasm_input, "r")
qasm_lines = f.read().splitlines()
f.close()
# split input into lines and ignore comments
qasm_lines = [line.strip() for line in qasm_lines]
qasm_lines = list(filter(lambda x: x[:2] != "//" and x != "", qasm_lines))
if version != "2.0":
raise NotImplementedError("QASM: Only OpenQASM 2.0 \
is currently supported.")
if qasm_lines.pop(0) != "OPENQASM 2.0;":
raise SyntaxError("QASM: File does not contain QASM 2.0 header")
qasm_obj = QasmProcessor(qasm_lines, mode=mode, version=version)
qasm_obj.commands = _tokenize(qasm_obj.commands)
qasm_obj._process_includes()
qasm_obj._initialize_pass()
qc = QubitCircuit(qasm_obj.num_qubits, num_cbits=qasm_obj.num_cbits)
qasm_obj._final_pass(qc)
return qc
|
32,246 |
def apply_dns_signature_policy_command(args) -> CommandResults:
anti_spy_ware_name = args.get('Anti_spyware_profile_name')
edl = args.get('DNS_signature_source')
action = args.get('Action')
packet_capture = args.get('Packet_capture', 'disable')
params = {
'action': 'set',
'type': 'config',
'xpath': f"/config/devices/entry[@name='localhost.localdomain']/device-group/entry[@name='{DEVICE_GROUP}']"
f"/profiles/spyware/entry[@name='{anti_spy_ware_name}']",
'key': API_KEY,
'element': '<botnet-domains>'
f'<lists>'
f'<entry name="{edl}"><packet-capture>{packet_capture}</packet-capture>'
f'<action><{action}/></action></entry>'
f'</lists>'
f'</botnet-domains>'
}
result = http_request(
URL,
'POST',
params=params,
)
res_status = result.get('response', {}).get('@status')
return CommandResults(outputs=result,
outputs_prefix='Panorama.ApplyDNS',
readable_output=f'**{res_status}**',
)
|
def apply_dns_signature_policy_command(args) -> CommandResults:
anti_spy_ware_name = args.get('Anti_spyware_profile_name')
edl = args.get('DNS_signature_source')
action = args.get('Action')
packet_capture = args.get('Packet_capture', 'disable')
params = {
'action': 'set',
'type': 'config',
'xpath': f"{XPATH_OBJECTS}/profiles/spyware/entry[@name='{anti_spy_ware_name}']",
'key': API_KEY,
'element': '<botnet-domains>'
f'<lists>'
f'<entry name="{edl}"><packet-capture>{packet_capture}</packet-capture>'
f'<action><{action}/></action></entry>'
f'</lists>'
f'</botnet-domains>'
}
result = http_request(
URL,
'POST',
params=params,
)
res_status = result.get('response', {}).get('@status')
return CommandResults(outputs=result,
outputs_prefix='Panorama.ApplyDNS',
readable_output=f'**{res_status}**',
)
|
35,321 |
def _validate_add_sw(add_sw, exec_path, force_intel=False):
"""Validate additional swtiches.
Parameters
----------
add_sw : str
Additional swtiches.
exec_path : str
Path to the MAPDL executable.
force_intel : bool, optional
Force the usage of intelmpi.
Returns
-------
str
Validated additional switches.
"""
# Converting additional_switches to lower case to avoid mismatches.
add_sw = add_sw.lower()
# known issues with distributed memory parallel (DMP)
if "smp" not in add_sw: # pragma: no cover
# Ubuntu ANSYS fails to launch without I_MPI_SHM_LMT
if _is_ubuntu():
os.environ['I_MPI_SHM_LMT'] = 'shm'
if os.name == 'nt' and not force_intel:
# Workaround to fix a problem when launching ansys in 'dmp' mode in the
# recent windows version and using VPN.
#
# There doesn't appear to be an easy way to check if we
# are running VPN in Windows in python, it seems we will
# need to know a local address where to ping but that will
# change for each client/person using the VPN.
#
# Adding '-mpi msmpi' to the launch parameter fix it.
if 'intelmpi' in add_sw:
# Remove intel flag.
regex = "(-mpi)( *?)(intelmpi)"
add_sw = re.sub(regex, '', add_sw)
warnings.warn(INTEL_MSG)
if _version_from_path(exec_file) >= 210:
add_sw += ' -mpi msmpi'
return add_sw
|
def _validate_add_sw(add_sw, exec_path, force_intel=False):
"""Validate additional switches.
Parameters
----------
add_sw : str
Additional swtiches.
exec_path : str
Path to the MAPDL executable.
force_intel : bool, optional
Force the usage of intelmpi.
Returns
-------
str
Validated additional switches.
"""
# Converting additional_switches to lower case to avoid mismatches.
add_sw = add_sw.lower()
# known issues with distributed memory parallel (DMP)
if "smp" not in add_sw: # pragma: no cover
# Ubuntu ANSYS fails to launch without I_MPI_SHM_LMT
if _is_ubuntu():
os.environ['I_MPI_SHM_LMT'] = 'shm'
if os.name == 'nt' and not force_intel:
# Workaround to fix a problem when launching ansys in 'dmp' mode in the
# recent windows version and using VPN.
#
# There doesn't appear to be an easy way to check if we
# are running VPN in Windows in python, it seems we will
# need to know a local address where to ping but that will
# change for each client/person using the VPN.
#
# Adding '-mpi msmpi' to the launch parameter fix it.
if 'intelmpi' in add_sw:
# Remove intel flag.
regex = "(-mpi)( *?)(intelmpi)"
add_sw = re.sub(regex, '', add_sw)
warnings.warn(INTEL_MSG)
if _version_from_path(exec_file) >= 210:
add_sw += ' -mpi msmpi'
return add_sw
|
46,967 |
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub
#
# For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
# behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.block_size <= 0:
block_size = tokenizer.max_len
else:
block_size = min(data_args.block_size, tokenizer.max_len)
# Main function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
lm_datasets = tokenized_datasets.map(group_texts, batched=True, load_from_cache_file=not data_args.overwrite_cache)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=lm_datasets["train"] if training_args.do_train else None,
eval_dataset=lm_datasets["validation"] if training_args.do_eval else None,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=default_data_collator,
)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model() # Saves the tokenizer too for easy upload
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
eval_output = trainer.evaluate()
perplexity = math.exp(eval_output["eval_loss"])
results["perplexity"] = perplexity
output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in results.items():
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
return results
|
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub
#
# For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
# behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.block_size <= 0:
block_size = tokenizer.max_len
else:
block_size = min(data_args.block_size, tokenizer.max_len)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
lm_datasets = tokenized_datasets.map(group_texts, batched=True, load_from_cache_file=not data_args.overwrite_cache)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=lm_datasets["train"] if training_args.do_train else None,
eval_dataset=lm_datasets["validation"] if training_args.do_eval else None,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=default_data_collator,
)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model() # Saves the tokenizer too for easy upload
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
eval_output = trainer.evaluate()
perplexity = math.exp(eval_output["eval_loss"])
results["perplexity"] = perplexity
output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in results.items():
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
return results
|
31,059 |
def main():
try:
if demisto.command() == 'test-module':
# Tests connectivity and credentails on login
# generateStartEndDates(1)
return "ok"
elif demisto.command() == 'ironportQuarantineReleaseEmail':
mesId = demisto.args().get('mid')
ironportQuarantineReleaseEmail(mesId)
elif demisto.command() == 'ironportSpamReleaseEmail':
mesId = demisto.args().get('mid')
ironportSpamReleaseEmail(mesId)
elif demisto.command() == 'ironPortSearchQuarantines':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearchQuarantines(period, senderPattern, recipientPattern, subjectPattern, limit)
elif demisto.command() == 'ironPortSearchSpam':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearchSpam(period, senderPattern, recipientPattern, subjectPattern, limit)
elif demisto.command() == 'ironPortSearch':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearch(period, senderPattern, recipientPattern, subjectPattern, limit)
except Exception as e:
LOG.print_log(e)
#
|
def main():
try:
if demisto.command() == 'test-module':
# Tests connectivity and credentails on login
# generateStartEndDates(1)
return "ok"
elif demisto.command() == 'ironportQuarantineReleaseEmail':
mesId = demisto.args().get('mid')
ironportQuarantineReleaseEmail(mesId)
elif demisto.command() == 'ironportSpamReleaseEmail':
mesId = demisto.args().get('mid')
ironportSpamReleaseEmail(mesId)
elif demisto.command() == 'ironPortSearchQuarantines':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearchQuarantines(period, senderPattern, recipientPattern, subjectPattern, limit)
elif demisto.command() == 'iron-port-search-spam':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearchSpam(period, senderPattern, recipientPattern, subjectPattern, limit)
elif demisto.command() == 'ironPortSearch':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearch(period, senderPattern, recipientPattern, subjectPattern, limit)
except Exception as e:
LOG.print_log(e)
#
|
14,580 |
def pi_gaze_items(root_dir):
def find_raw_path(timestamps_path):
raw_name = timestamps_path.name.replace("_timestamps", "")
raw_path = timestamps_path.with_name(raw_name).with_suffix(".raw")
assert raw_path.exists(), f"The file does not exist at path: {raw_path}"
return raw_path
def find_worn_path(timestamps_path):
worn_name = timestamps_path.name
worn_name = worn_name.replace("gaze", "worn")
worn_name = worn_name.replace("_timestamps", "")
worn_path = timestamps_path.with_name(worn_name).with_suffix(".raw")
if worn_path.exists():
return worn_path
else:
return None
def load_timestamps_data(path):
timestamps = np.load(str(path))
return timestamps
def load_raw_data(path):
raw_data = np.fromfile(str(path), "<f4")
raw_data_dtype = raw_data.dtype
raw_data.shape = (-1, 2)
return np.asarray(raw_data, dtype=raw_data_dtype)
def load_worn_data(path):
if not (path and path.exists()):
return None
confidences = np.fromfile(str(path), "<u1") / 255.0
return np.clip(confidences, 0.0, 1.0)
# This pattern will match any filename that:
# - starts with "gaze ps"
# - is followed by one or more digits
# - ends with "_timestamps.npy"
gaze_timestamp_paths = match_contents_by_name_pattern(
pl.Path(root_dir), "^gaze ps[0-9]+_timestamps.npy$"
)
for timestamps_path in gaze_timestamp_paths:
raw_path = find_raw_path(timestamps_path)
timestamps = load_timestamps_data(timestamps_path)
raw_data = load_raw_data(raw_path)
if len(raw_data) != len(timestamps):
logger.warning(
f"There is a mismatch between the number of raw data ({len(raw_data)}) "
f"and the number of timestamps ({len(timestamps)})!"
)
size = min(len(raw_data), len(timestamps))
raw_data = raw_data[:size]
timestamps = timestamps[:size]
conf_data = load_worn_data(find_worn_path(timestamps_path))
if conf_data is not None and len(conf_data) != len(timestamps):
logger.warning(
f"There is a mismatch between the number of confidence data ({len(conf_data)}) "
f"and the number of timestamps ({len(timestamps)})! Not using confidence data."
)
conf_data = None
if conf_data is None:
conf_data = (1.0 for _ in range(len(timestamps)))
yield from zip(raw_data, timestamps, conf_data)
|
def pi_gaze_items(root_dir):
def find_raw_path(timestamps_path):
raw_name = timestamps_path.name.replace("_timestamps", "")
raw_path = timestamps_path.with_name(raw_name).with_suffix(".raw")
assert raw_path.exists(), f"The file does not exist at path: {raw_path}"
return raw_path
def find_worn_path(timestamps_path):
worn_name = timestamps_path.name
worn_name = worn_name.replace("gaze", "worn")
worn_name = worn_name.replace("_timestamps", "")
worn_path = timestamps_path.with_name(worn_name).with_suffix(".raw")
if worn_path.exists():
return worn_path
else:
return None
def load_timestamps_data(path):
timestamps = np.load(str(path))
return timestamps
def load_raw_data(path):
raw_data = np.fromfile(str(path), "<f4")
raw_data_dtype = raw_data.dtype
raw_data.shape = (-1, 2)
return np.asarray(raw_data, dtype=raw_data_dtype)
def load_worn_data(path):
if not (path and path.exists()):
return None
confidences = np.fromfile(str(path), "<u1") / 255.0
return np.clip(confidences, 0.0, 1.0)
# This pattern will match any filename that:
# - starts with "gaze ps"
# - is followed by one or more digits
# - ends with "_timestamps.npy"
gaze_timestamp_paths = match_contents_by_name_pattern(
pl.Path(root_dir), r"^gaze ps[0-9]+_timestamps.npy$"
)
for timestamps_path in gaze_timestamp_paths:
raw_path = find_raw_path(timestamps_path)
timestamps = load_timestamps_data(timestamps_path)
raw_data = load_raw_data(raw_path)
if len(raw_data) != len(timestamps):
logger.warning(
f"There is a mismatch between the number of raw data ({len(raw_data)}) "
f"and the number of timestamps ({len(timestamps)})!"
)
size = min(len(raw_data), len(timestamps))
raw_data = raw_data[:size]
timestamps = timestamps[:size]
conf_data = load_worn_data(find_worn_path(timestamps_path))
if conf_data is not None and len(conf_data) != len(timestamps):
logger.warning(
f"There is a mismatch between the number of confidence data ({len(conf_data)}) "
f"and the number of timestamps ({len(timestamps)})! Not using confidence data."
)
conf_data = None
if conf_data is None:
conf_data = (1.0 for _ in range(len(timestamps)))
yield from zip(raw_data, timestamps, conf_data)
|
38,694 |
def main():
# Setup command line options
argparser = argparse.ArgumentParser()
output_options = argparser.add_argument_group(
'Options controlling ReFrame output'
)
locate_options = argparser.add_argument_group(
'Options for discovering checks'
)
select_options = argparser.add_argument_group(
'Options for selecting checks'
)
action_options = argparser.add_argument_group(
'Options controlling actions'
)
run_options = argparser.add_argument_group(
'Options controlling the execution of checks'
)
env_options = argparser.add_argument_group(
'Options controlling the ReFrame environment'
)
misc_options = argparser.add_argument_group('Miscellaneous options')
# Output directory options
output_options.add_argument(
'--prefix', action='store', metavar='DIR',
help='Set general directory prefix to DIR',
envvar='RFM_PREFIX', configvar='systems/prefix'
)
output_options.add_argument(
'-o', '--output', action='store', metavar='DIR',
help='Set output directory prefix to DIR',
envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir'
)
output_options.add_argument(
'-s', '--stage', action='store', metavar='DIR',
help='Set stage directory prefix to DIR',
envvar='RFM_STAGE_DIR', configvar='systems/stagedir'
)
output_options.add_argument(
'--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT',
help=('Append a timestamp to the output and stage directory prefixes '
'(default: "%%FT%%T")'),
envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs'
)
output_options.add_argument(
'--perflogdir', action='store', metavar='DIR',
help=('Set performance log data directory prefix '
'(relevant only to the filelog log handler)'),
envvar='RFM_PERFLOG_DIR',
configvar='logging/handlers_perflog/filelog_basedir'
)
output_options.add_argument(
'--keep-stage-files', action='store_true',
help='Keep stage directories even for successful checks',
envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files'
)
output_options.add_argument(
'--dont-restage', action='store_false', dest='clean_stagedir',
help='Reuse the test stage directory',
envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir'
)
output_options.add_argument(
'--save-log-files', action='store_true', default=False,
help='Save ReFrame log files to the output directory',
envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files'
)
output_options.add_argument(
'--report-file', action='store', metavar='FILE',
help="Store JSON run report in FILE",
envvar='RFM_REPORT_FILE',
configvar='general/report_file'
)
# Check discovery options
locate_options.add_argument(
'-c', '--checkpath', action='append', metavar='PATH',
help="Add PATH to the check search path list",
envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path'
)
locate_options.add_argument(
'-R', '--recursive', action='store_true',
help='Search for checks in the search path recursively',
envvar='RFM_CHECK_SEARCH_RECURSIVE',
configvar='general/check_search_recursive'
)
locate_options.add_argument(
'--ignore-check-conflicts', action='store_true',
help='Skip checks with conflicting names',
envvar='RFM_IGNORE_CHECK_CONFLICTS',
configvar='general/ignore_check_conflicts'
)
# Select options
select_options.add_argument(
'-t', '--tag', action='append', dest='tags', metavar='PATTERN',
default=[],
help='Select checks with at least one tag matching PATTERN'
)
select_options.add_argument(
'-n', '--name', action='append', dest='names', default=[],
metavar='PATTERN', help='Select checks whose name matches PATTERN'
)
select_options.add_argument(
'-x', '--exclude', action='append', dest='exclude_names',
metavar='PATTERN', default=[],
help='Exclude checks whose name matches PATTERN'
)
select_options.add_argument(
'-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN',
help=('Select checks with at least one '
'programming environment matching PATTERN')
)
select_options.add_argument(
'--failed', action='store_true',
help="Select failed test cases (only when '--restore-session' is used)"
)
select_options.add_argument(
'--gpu-only', action='store_true',
help='Select only GPU checks'
)
select_options.add_argument(
'--cpu-only', action='store_true',
help='Select only CPU checks'
)
# Action options
action_options.add_argument(
'-l', '--list', action='store_true',
help='List the selected checks'
)
action_options.add_argument(
'-L', '--list-detailed', action='store_true',
help='List the selected checks providing details for each test'
)
action_options.add_argument(
'-r', '--run', action='store_true',
help='Run the selected checks'
)
# Run options
run_options.add_argument(
'-J', '--job-option', action='append', metavar='OPT',
dest='job_options', default=[],
help='Pass option OPT to job scheduler'
)
run_options.add_argument(
'--force-local', action='store_true',
help='Force local execution of checks'
)
run_options.add_argument(
'--skip-sanity-check', action='store_true',
help='Skip sanity checking'
)
run_options.add_argument(
'--skip-performance-check', action='store_true',
help='Skip performance checking'
)
run_options.add_argument(
'--strict', action='store_true',
help='Enforce strict performance checking'
)
run_options.add_argument(
'--skip-system-check', action='store_true',
help='Skip system check'
)
run_options.add_argument(
'--skip-prgenv-check', action='store_true',
help='Skip programming environment check'
)
run_options.add_argument(
'--exec-policy', metavar='POLICY', action='store',
choices=['async', 'serial'], default='async',
help='Set the execution policy of ReFrame (default: "async")'
)
run_options.add_argument(
'--mode', action='store', help='Execution mode to use'
)
run_options.add_argument(
'--max-retries', metavar='NUM', action='store', default=0,
help='Set the maximum number of times a failed regression test '
'may be retried (default: 0)'
)
run_options.add_argument(
'--max-fail', metavar='NUM', action='store', default=0,
help='Set the maximum number of failures before exiting'
)
run_options.add_argument(
'--restore-session', action='store', nargs='?', const='',
metavar='REPORT',
help='Restore a testing session from REPORT file'
)
run_options.add_argument(
'--flex-alloc-nodes', action='store',
dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None,
help='Set strategy for the flexible node allocation (default: "idle").'
)
run_options.add_argument(
'--disable-hook', action='append', metavar='NAME', dest='hooks',
default=[], help='Disable a pipeline hook for this run'
)
env_options.add_argument(
'-M', '--map-module', action='append', metavar='MAPPING',
dest='module_mappings', default=[],
help='Add a module mapping',
envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings'
)
env_options.add_argument(
'-m', '--module', action='append', default=[],
metavar='MOD', dest='user_modules',
help='Load module MOD before running any regression check',
envvar='RFM_USER_MODULES ,', configvar='general/user_modules'
)
env_options.add_argument(
'--module-mappings', action='store', metavar='FILE',
dest='module_map_file',
help='Load module mappings from FILE',
envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file'
)
env_options.add_argument(
'-u', '--unload-module', action='append', metavar='MOD',
dest='unload_modules', default=[],
help='Unload module MOD before running any regression check',
envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules'
)
env_options.add_argument(
'--module-path', action='append', metavar='PATH',
dest='module_paths', default=[],
help='(Un)use module path PATH before running any regression check',
)
env_options.add_argument(
'--purge-env', action='store_true', dest='purge_env', default=False,
help='Unload all modules before running any regression check',
envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment'
)
env_options.add_argument(
'--non-default-craype', action='store_true',
help='Test a non-default Cray Programming Environment',
envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype'
)
# Miscellaneous options
misc_options.add_argument(
'-C', '--config-file', action='store',
dest='config_file', metavar='FILE',
help='Set configuration file',
envvar='RFM_CONFIG_FILE'
)
misc_options.add_argument(
'--nocolor', action='store_false', dest='colorize',
help='Disable coloring of output',
envvar='RFM_COLORIZE', configvar='general/colorize'
)
misc_options.add_argument(
'--failure-stats', action='store_true', help='Print failure statistics'
)
misc_options.add_argument(
'--performance-report', action='store_true',
help='Print a report for performance tests'
)
misc_options.add_argument(
'--show-config', action='store', nargs='?', const='all',
metavar='PARAM',
help='Print the value of configuration parameter PARAM and exit'
)
misc_options.add_argument(
'--system', action='store', help='Load configuration for SYSTEM',
envvar='RFM_SYSTEM'
)
misc_options.add_argument(
'--upgrade-config-file', action='store', metavar='OLD[:NEW]',
help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax'
)
misc_options.add_argument(
'-V', '--version', action='version', version=osext.reframe_version()
)
misc_options.add_argument(
'-v', '--verbose', action='count',
help='Increase verbosity level of output',
envvar='RFM_VERBOSE', configvar='general/verbose'
)
# Options not associated with command-line arguments
argparser.add_argument(
dest='graylog_server',
envvar='RFM_GRAYLOG_ADDRESS',
configvar='logging/handlers_perflog/graylog_address',
help='Graylog server address'
)
argparser.add_argument(
dest='syslog_address',
envvar='RFM_SYSLOG_ADDRESS',
configvar='logging/handlers_perflog/syslog_address',
help='Syslog server address'
)
argparser.add_argument(
dest='ignore_reqnodenotavail',
envvar='RFM_IGNORE_REQNODENOTAVAIL',
configvar='schedulers/ignore_reqnodenotavail',
action='store_true',
help='Graylog server address'
)
argparser.add_argument(
dest='use_login_shell',
envvar='RFM_USE_LOGIN_SHELL',
configvar='general/use_login_shell',
action='store_true',
help='Use a login shell for job scripts'
)
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# Parse command line
options = argparser.parse_args()
# First configure logging with our generic configuration so as to be able
# to print pretty messages; logging will be reconfigured by user's
# configuration later
site_config = config.load_config(
os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py')
)
site_config.select_subconfig('generic')
options.update_config(site_config)
logging.configure_logging(site_config)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer = PrettyPrinter()
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
if os.getenv('RFM_GRAYLOG_SERVER'):
printer.warning(
'RFM_GRAYLOG_SERVER environment variable is deprecated; '
'please use RFM_GRAYLOG_ADDRESS instead'
)
os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')
if options.upgrade_config_file is not None:
old_config, *new_config = options.upgrade_config_file.split(
':', maxsplit=1
)
new_config = new_config[0] if new_config else None
try:
new_config = config.convert_old_config(old_config, new_config)
except Exception as e:
printer.error(f'could not convert file: {e}')
sys.exit(1)
printer.info(
f'Conversion successful! '
f'The converted file can be found at {new_config!r}.'
)
sys.exit(0)
# Now configure ReFrame according to the user configuration file
try:
try:
printer.debug('Loading user configuration')
site_config = config.load_config(options.config_file)
except warnings.ReframeDeprecationWarning as e:
printer.warning(e)
converted = config.convert_old_config(options.config_file)
printer.warning(
f"configuration file has been converted "
f"to the new syntax here: '{converted}'"
)
site_config = config.load_config(converted)
site_config.validate()
# We ignore errors about unresolved sections or configuration
# parameters here, because they might be defined at the individual
# partition level and will be caught when we will instantiating
# internally the system and partitions later on.
site_config.select_subconfig(options.system,
ignore_resolve_errors=True)
for err in options.update_config(site_config):
printer.warning(str(err))
# Update options from the selected execution mode
if options.mode:
mode_args = site_config.get(f'modes/@{options.mode}/options')
# We lexically split the mode options, because otherwise spaces
# will be treated as part of the option argument; see GH bug #1554
mode_args = list(itertools.chain.from_iterable(shlex.split(m)
for m in mode_args))
# Parse the mode's options and reparse the command-line
options = argparser.parse_args(mode_args)
options = argparser.parse_args(namespace=options.cmd_options)
options.update_config(site_config)
logging.configure_logging(site_config)
except (OSError, errors.ConfigError) as e:
printer.error(f'failed to load configuration: {e}')
printer.error(logfiles_message())
sys.exit(1)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
try:
printer.debug('Initializing runtime')
runtime.init_runtime(site_config)
except errors.ConfigError as e:
printer.error(f'failed to initialize runtime: {e}')
printer.error(logfiles_message())
sys.exit(1)
rt = runtime.runtime()
try:
if site_config.get('general/0/module_map_file'):
rt.modules_system.load_mapping_from_file(
site_config.get('general/0/module_map_file')
)
if site_config.get('general/0/module_mappings'):
for m in site_config.get('general/0/module_mappings'):
rt.modules_system.load_mapping(m)
except (errors.ConfigError, OSError) as e:
printer.error('could not load module mappings: %s' % e)
sys.exit(1)
if (osext.samefile(rt.stage_prefix, rt.output_prefix) and
not site_config.get('general/0/keep_stage_files')):
printer.error("stage and output refer to the same directory; "
"if this is on purpose, please use the "
"'--keep-stage-files' option.")
printer.error(logfiles_message())
sys.exit(1)
# Show configuration after everything is set up
if options.show_config:
config_param = options.show_config
if config_param == 'all':
printer.info(str(rt.site_config))
else:
value = rt.get_option(config_param)
if value is None:
printer.error(
f'no such configuration parameter found: {config_param}'
)
else:
printer.info(json.dumps(value, indent=2))
sys.exit(0)
printer.debug(format_env(options.env_vars))
# Setup the check loader
if options.restore_session is not None:
# We need to load the failed checks only from a report
if options.restore_session:
filename = options.restore_session
else:
filename = runreport.next_report_filename(
osext.expandvars(site_config.get('general/0/report_file')),
new=False
)
report = runreport.load_report(filename)
check_search_path = list(report.slice('filename', unique=True))
check_search_recursive = False
# If `-c` or `-R` are passed explicitly outside the configuration
# file, override the values set from the report file
if site_config.is_sticky_option('general/check_search_path'):
printer.warning(
'Ignoring check search path set in the report file: '
'search path set explicitly in the command-line or '
'the environment'
)
check_search_path = site_config.get(
'general/0/check_search_path'
)
if site_config.is_sticky_option('general/check_search_recursive'):
printer.warning(
'Ignoring check search recursive option from the report file: '
'option set explicitly in the command-line or the environment'
)
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
else:
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
check_search_path = site_config.get('general/0/check_search_path')
loader = RegressionCheckLoader(
load_path=check_search_path,
recurse=check_search_recursive,
ignore_conflicts=site_config.get(
'general/0/ignore_check_conflicts'
)
)
def print_infoline(param, value):
param = param + ':'
printer.info(f" {param.ljust(18)} {value}")
session_info = {
'cmdline': ' '.join(sys.argv),
'config_file': rt.site_config.filename,
'data_version': runreport.DATA_VERSION,
'hostname': socket.gethostname(),
'prefix_output': rt.output_prefix,
'prefix_stage': rt.stage_prefix,
'user': osext.osuser(),
'version': osext.reframe_version(),
'workdir': os.getcwd(),
}
# Print command line
printer.info(f"[ReFrame Setup]")
print_infoline('version', session_info['version'])
print_infoline('command', repr(session_info['cmdline']))
print_infoline(
f"launched by",
f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}"
)
print_infoline('working directory', repr(session_info['workdir']))
print_infoline('settings file', f"{session_info['config_file']!r}")
print_infoline('check search path',
f"{'(R) ' if loader.recurse else ''}"
f"{':'.join(loader.load_path)!r}")
print_infoline('stage directory', repr(session_info['prefix_stage']))
print_infoline('output directory', repr(session_info['prefix_output']))
printer.info('')
try:
# Locate and load checks
try:
checks_found = loader.load_all()
printer.verbose(f'Loaded {len(checks_found)} test(s)')
except OSError as e:
raise errors.ReframeError from e
# Generate all possible test cases first; we will need them for
# resolving dependencies after filtering
# Determine the allowed programming environments
allowed_environs = {e.name
for env_patt in options.prgenv
for p in rt.system.partitions
for e in p.environs if re.match(env_patt, e.name)}
testcases_all = generate_testcases(checks_found,
options.skip_system_check,
options.skip_prgenv_check,
allowed_environs)
testcases = testcases_all
printer.verbose(f'Generated {len(testcases)} test case(s)')
# Filter test cases by name
if options.exclude_names:
for name in options.exclude_names:
testcases = filter(filters.have_not_name(name), testcases)
if options.names:
testcases = filter(
filters.have_name('|'.join(options.names)), testcases
)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by name: {len(testcases)} remaining'
)
# Filter test cases by tags
for tag in options.tags:
testcases = filter(filters.have_tag(tag), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by tags: {len(testcases)} remaining'
)
# Filter test cases further
if options.gpu_only and options.cpu_only:
printer.error("options `--gpu-only' and `--cpu-only' "
"are mutually exclusive")
sys.exit(1)
if options.gpu_only:
testcases = filter(filters.have_gpu_only(), testcases)
elif options.cpu_only:
testcases = filter(filters.have_cpu_only(), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by other attributes: '
f'{len(testcases)} remaining'
)
# Filter in failed cases
if options.failed:
if options.restore_session is None:
printer.error(
"the option '--failed' can only be used "
"in combination with the '--restore-session' option"
)
sys.exit(1)
def _case_failed(t):
rec = report.case(*t)
if rec and rec['result'] == 'failure':
return True
else:
return False
testcases = list(filter(_case_failed, testcases))
printer.verbose(
f'Filtering successful test case(s): '
f'{len(testcases)} remaining'
)
# Prepare for running
printer.debug('Building and validating the full test DAG')
testgraph, skipped_cases = dependencies.build_deps(testcases_all)
if skipped_cases:
# Some cases were skipped, so adjust testcases
testcases = list(set(testcases) - set(skipped_cases))
printer.verbose(
f'Filtering test case(s) due to unresolved dependencies: '
f'{len(testcases)} remaining'
)
dependencies.validate_deps(testgraph)
printer.debug('Full test DAG:')
printer.debug(dependencies.format_deps(testgraph))
restored_cases = []
if len(testcases) != len(testcases_all):
testgraph = dependencies.prune_deps(
testgraph, testcases,
max_depth=1 if options.restore_session is not None else None
)
printer.debug('Pruned test DAG')
printer.debug(dependencies.format_deps(testgraph))
if options.restore_session is not None:
testgraph, restored_cases = report.restore_dangling(testgraph)
testcases = dependencies.toposort(
testgraph,
is_subgraph=options.restore_session is not None
)
printer.verbose(f'Final number of test cases: {len(testcases)}')
# Disable hooks
for tc in testcases:
for h in options.hooks:
type(tc.check).disable_hook(h)
# Act on checks
if options.list or options.list_detailed:
list_checks(testcases, printer, options.list_detailed)
sys.exit(0)
if not options.run:
printer.error(f"No action specified. Please specify `-l'/`-L' for "
f"listing or `-r' for running. "
f"Try `{argparser.prog} -h' for more options.")
sys.exit(1)
# Manipulate ReFrame's environment
if site_config.get('general/0/purge_environment'):
rt.modules_system.unload_all()
else:
for m in site_config.get('general/0/unload_modules'):
rt.modules_system.unload_module(**m)
# Load the environment for the current system
try:
printer.debug(f'Loading environment for current system')
runtime.loadenv(rt.system.preload_environ)
except errors.EnvironError as e:
printer.error("failed to load current system's environment; "
"please check your configuration")
printer.debug(str(e))
raise
def module_use(*paths):
try:
rt.modules_system.searchpath_add(*paths)
except errors.EnvironError as e:
printer.warning(f'could not add module paths correctly')
printer.debug(str(e))
def module_unuse(*paths):
try:
rt.modules_system.searchpath_remove(*paths)
except errors.EnvironError as e:
printer.warning(f'could not remove module paths correctly')
printer.debug(str(e))
printer.debug('(Un)using module paths from command line')
module_paths = {}
for d in options.module_paths:
if d.startswith('-'):
module_paths.setdefault('-', [])
module_paths['-'].append(d[1:])
elif d.startswith('+'):
module_paths.setdefault('+', [])
module_paths['+'].append(d[1:])
else:
module_paths.setdefault('x', [])
module_paths['x'].append(d)
for op, paths in module_paths.items():
if op == '+':
module_use(*paths)
elif op == '-':
module_unuse(*paths)
else:
# First empty the current module path in a portable way
searchpath = [p for p in rt.modules_system.searchpath if p]
if searchpath:
rt.modules_system.searchpath_remove(*searchpath)
# Treat `A:B` syntax as well in this case
paths = itertools.chain(*(p.split(':') for p in paths))
module_use(*paths)
printer.debug('Loading user modules from command line')
for m in site_config.get('general/0/user_modules'):
try:
rt.modules_system.load_module(**m, force=True)
except errors.EnvironError as e:
printer.warning(
f'could not load module {m["name"]!r} correctly; '
f'skipping...'
)
printer.debug(str(e))
options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'
# Run the tests
# Setup the execution policy
if options.exec_policy == 'serial':
exec_policy = SerialExecutionPolicy()
elif options.exec_policy == 'async':
exec_policy = AsynchronousExecutionPolicy()
else:
# This should not happen, since choices are handled by
# argparser
printer.error("unknown execution policy `%s': Exiting...")
sys.exit(1)
exec_policy.skip_system_check = options.skip_system_check
exec_policy.force_local = options.force_local
exec_policy.strict_check = options.strict
exec_policy.skip_sanity_check = options.skip_sanity_check
exec_policy.skip_performance_check = options.skip_performance_check
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
if sched_flex_alloc_nodes <= 0:
raise errors.ConfigError(
errmsg.format(options.flex_alloc_nodes)
)
except ValueError:
sched_flex_alloc_nodes = options.flex_alloc_nodes
exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
parsed_job_options = []
for opt in options.job_options:
if opt.startswith('-') or opt.startswith('#'):
parsed_job_options.append(opt)
elif len(opt) == 1:
parsed_job_options.append(f'-{opt}')
else:
parsed_job_options.append(f'--{opt}')
exec_policy.sched_options = parsed_job_options
try:
max_retries = int(options.max_retries)
except ValueError:
raise errors.ConfigError(
f'--max-retries is not a valid integer: {max_retries}'
) from None
try:
max_fail = int(options.max_fail)
except ValueError:
raise errors.ConfigError(
f'--max-fail is not a valid integer: {max_fail}'
) from None
runner = Runner(exec_policy, printer, max_retries, max_fail)
try:
time_start = time.time()
session_info['time_start'] = time.strftime(
'%FT%T%z', time.localtime(time_start),
)
runner.runall(testcases, restored_cases)
finally:
time_end = time.time()
session_info['time_end'] = time.strftime(
'%FT%T%z', time.localtime(time_end)
)
session_info['time_elapsed'] = time_end - time_start
# Print a retry report if we did any retries
if runner.stats.failures(run=0):
printer.info(runner.stats.retry_report())
# Print a failure report if we had failures in the last run
success = True
if runner.stats.failures():
success = False
runner.stats.print_failure_report(printer)
if options.failure_stats:
runner.stats.print_failure_stats(printer)
if options.performance_report:
printer.info(runner.stats.performance_report())
# Generate the report for this session
report_file = os.path.normpath(
osext.expandvars(rt.get_option('general/0/report_file'))
)
basedir = os.path.dirname(report_file)
if basedir:
os.makedirs(basedir, exist_ok=True)
# Build final JSON report
run_stats = runner.stats.json()
session_info.update({
'num_cases': run_stats[0]['num_cases'],
'num_failures': run_stats[-1]['num_failures']
})
json_report = {
'session_info': session_info,
'runs': run_stats,
'restored_cases': []
}
if options.restore_session is not None:
for c in restored_cases:
json_report['restored_cases'].append(report.case(*c))
report_file = runreport.next_report_filename(report_file)
try:
with open(report_file, 'w') as fp:
jsonext.dump(json_report, fp, indent=2)
fp.write('\n')
except OSError as e:
printer.warning(
f'failed to generate report in {report_file!r}: {e}'
)
if not success:
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(1)
except errors.ReframeError as e:
printer.error(str(e))
sys.exit(1)
except (Exception, errors.ReframeFatalError):
exc_info = sys.exc_info()
tb = ''.join(traceback.format_exception(*exc_info))
printer.error(errors.what(*exc_info))
if errors.is_severe(*exc_info):
printer.error(tb)
else:
printer.verbose(tb)
sys.exit(1)
finally:
try:
log_files = logging.log_files()
if site_config.get('general/0/save_log_files'):
log_files = logging.save_log_files(rt.output_prefix)
except OSError as e:
printer.error(f'could not save log file: {e}')
sys.exit(1)
finally:
printer.info(logfiles_message())
|
def main():
# Setup command line options
argparser = argparse.ArgumentParser()
output_options = argparser.add_argument_group(
'Options controlling ReFrame output'
)
locate_options = argparser.add_argument_group(
'Options for discovering checks'
)
select_options = argparser.add_argument_group(
'Options for selecting checks'
)
action_options = argparser.add_argument_group(
'Options controlling actions'
)
run_options = argparser.add_argument_group(
'Options controlling the execution of checks'
)
env_options = argparser.add_argument_group(
'Options controlling the ReFrame environment'
)
misc_options = argparser.add_argument_group('Miscellaneous options')
# Output directory options
output_options.add_argument(
'--prefix', action='store', metavar='DIR',
help='Set general directory prefix to DIR',
envvar='RFM_PREFIX', configvar='systems/prefix'
)
output_options.add_argument(
'-o', '--output', action='store', metavar='DIR',
help='Set output directory prefix to DIR',
envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir'
)
output_options.add_argument(
'-s', '--stage', action='store', metavar='DIR',
help='Set stage directory prefix to DIR',
envvar='RFM_STAGE_DIR', configvar='systems/stagedir'
)
output_options.add_argument(
'--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT',
help=('Append a timestamp to the output and stage directory prefixes '
'(default: "%%FT%%T")'),
envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs'
)
output_options.add_argument(
'--perflogdir', action='store', metavar='DIR',
help=('Set performance log data directory prefix '
'(relevant only to the filelog log handler)'),
envvar='RFM_PERFLOG_DIR',
configvar='logging/handlers_perflog/filelog_basedir'
)
output_options.add_argument(
'--keep-stage-files', action='store_true',
help='Keep stage directories even for successful checks',
envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files'
)
output_options.add_argument(
'--dont-restage', action='store_false', dest='clean_stagedir',
help='Reuse the test stage directory',
envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir'
)
output_options.add_argument(
'--save-log-files', action='store_true', default=False,
help='Save ReFrame log files to the output directory',
envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files'
)
output_options.add_argument(
'--report-file', action='store', metavar='FILE',
help="Store JSON run report in FILE",
envvar='RFM_REPORT_FILE',
configvar='general/report_file'
)
# Check discovery options
locate_options.add_argument(
'-c', '--checkpath', action='append', metavar='PATH',
help="Add PATH to the check search path list",
envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path'
)
locate_options.add_argument(
'-R', '--recursive', action='store_true',
help='Search for checks in the search path recursively',
envvar='RFM_CHECK_SEARCH_RECURSIVE',
configvar='general/check_search_recursive'
)
locate_options.add_argument(
'--ignore-check-conflicts', action='store_true',
help='Skip checks with conflicting names',
envvar='RFM_IGNORE_CHECK_CONFLICTS',
configvar='general/ignore_check_conflicts'
)
# Select options
select_options.add_argument(
'-t', '--tag', action='append', dest='tags', metavar='PATTERN',
default=[],
help='Select checks with at least one tag matching PATTERN'
)
select_options.add_argument(
'-n', '--name', action='append', dest='names', default=[],
metavar='PATTERN', help='Select checks whose name matches PATTERN'
)
select_options.add_argument(
'-x', '--exclude', action='append', dest='exclude_names',
metavar='PATTERN', default=[],
help='Exclude checks whose name matches PATTERN'
)
select_options.add_argument(
'-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN',
help=('Select checks with at least one '
'programming environment matching PATTERN')
)
select_options.add_argument(
'--failed', action='store_true',
help="Select failed test cases (only when '--restore-session' is used)"
)
select_options.add_argument(
'--gpu-only', action='store_true',
help='Select only GPU checks'
)
select_options.add_argument(
'--cpu-only', action='store_true',
help='Select only CPU checks'
)
# Action options
action_options.add_argument(
'-l', '--list', action='store_true',
help='List the selected checks'
)
action_options.add_argument(
'-L', '--list-detailed', action='store_true',
help='List the selected checks providing details for each test'
)
action_options.add_argument(
'-r', '--run', action='store_true',
help='Run the selected checks'
)
# Run options
run_options.add_argument(
'-J', '--job-option', action='append', metavar='OPT',
dest='job_options', default=[],
help='Pass option OPT to job scheduler'
)
run_options.add_argument(
'--force-local', action='store_true',
help='Force local execution of checks'
)
run_options.add_argument(
'--skip-sanity-check', action='store_true',
help='Skip sanity checking'
)
run_options.add_argument(
'--skip-performance-check', action='store_true',
help='Skip performance checking'
)
run_options.add_argument(
'--strict', action='store_true',
help='Enforce strict performance checking'
)
run_options.add_argument(
'--skip-system-check', action='store_true',
help='Skip system check'
)
run_options.add_argument(
'--skip-prgenv-check', action='store_true',
help='Skip programming environment check'
)
run_options.add_argument(
'--exec-policy', metavar='POLICY', action='store',
choices=['async', 'serial'], default='async',
help='Set the execution policy of ReFrame (default: "async")'
)
run_options.add_argument(
'--mode', action='store', help='Execution mode to use'
)
run_options.add_argument(
'--max-retries', metavar='NUM', action='store', default=0,
help='Set the maximum number of times a failed regression test '
'may be retried (default: 0)'
)
run_options.add_argument(
'--maxfail', metavar='NUM', action='store', default=0,
help='Set the maximum number of failures before exiting'
)
run_options.add_argument(
'--restore-session', action='store', nargs='?', const='',
metavar='REPORT',
help='Restore a testing session from REPORT file'
)
run_options.add_argument(
'--flex-alloc-nodes', action='store',
dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None,
help='Set strategy for the flexible node allocation (default: "idle").'
)
run_options.add_argument(
'--disable-hook', action='append', metavar='NAME', dest='hooks',
default=[], help='Disable a pipeline hook for this run'
)
env_options.add_argument(
'-M', '--map-module', action='append', metavar='MAPPING',
dest='module_mappings', default=[],
help='Add a module mapping',
envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings'
)
env_options.add_argument(
'-m', '--module', action='append', default=[],
metavar='MOD', dest='user_modules',
help='Load module MOD before running any regression check',
envvar='RFM_USER_MODULES ,', configvar='general/user_modules'
)
env_options.add_argument(
'--module-mappings', action='store', metavar='FILE',
dest='module_map_file',
help='Load module mappings from FILE',
envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file'
)
env_options.add_argument(
'-u', '--unload-module', action='append', metavar='MOD',
dest='unload_modules', default=[],
help='Unload module MOD before running any regression check',
envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules'
)
env_options.add_argument(
'--module-path', action='append', metavar='PATH',
dest='module_paths', default=[],
help='(Un)use module path PATH before running any regression check',
)
env_options.add_argument(
'--purge-env', action='store_true', dest='purge_env', default=False,
help='Unload all modules before running any regression check',
envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment'
)
env_options.add_argument(
'--non-default-craype', action='store_true',
help='Test a non-default Cray Programming Environment',
envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype'
)
# Miscellaneous options
misc_options.add_argument(
'-C', '--config-file', action='store',
dest='config_file', metavar='FILE',
help='Set configuration file',
envvar='RFM_CONFIG_FILE'
)
misc_options.add_argument(
'--nocolor', action='store_false', dest='colorize',
help='Disable coloring of output',
envvar='RFM_COLORIZE', configvar='general/colorize'
)
misc_options.add_argument(
'--failure-stats', action='store_true', help='Print failure statistics'
)
misc_options.add_argument(
'--performance-report', action='store_true',
help='Print a report for performance tests'
)
misc_options.add_argument(
'--show-config', action='store', nargs='?', const='all',
metavar='PARAM',
help='Print the value of configuration parameter PARAM and exit'
)
misc_options.add_argument(
'--system', action='store', help='Load configuration for SYSTEM',
envvar='RFM_SYSTEM'
)
misc_options.add_argument(
'--upgrade-config-file', action='store', metavar='OLD[:NEW]',
help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax'
)
misc_options.add_argument(
'-V', '--version', action='version', version=osext.reframe_version()
)
misc_options.add_argument(
'-v', '--verbose', action='count',
help='Increase verbosity level of output',
envvar='RFM_VERBOSE', configvar='general/verbose'
)
# Options not associated with command-line arguments
argparser.add_argument(
dest='graylog_server',
envvar='RFM_GRAYLOG_ADDRESS',
configvar='logging/handlers_perflog/graylog_address',
help='Graylog server address'
)
argparser.add_argument(
dest='syslog_address',
envvar='RFM_SYSLOG_ADDRESS',
configvar='logging/handlers_perflog/syslog_address',
help='Syslog server address'
)
argparser.add_argument(
dest='ignore_reqnodenotavail',
envvar='RFM_IGNORE_REQNODENOTAVAIL',
configvar='schedulers/ignore_reqnodenotavail',
action='store_true',
help='Graylog server address'
)
argparser.add_argument(
dest='use_login_shell',
envvar='RFM_USE_LOGIN_SHELL',
configvar='general/use_login_shell',
action='store_true',
help='Use a login shell for job scripts'
)
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# Parse command line
options = argparser.parse_args()
# First configure logging with our generic configuration so as to be able
# to print pretty messages; logging will be reconfigured by user's
# configuration later
site_config = config.load_config(
os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py')
)
site_config.select_subconfig('generic')
options.update_config(site_config)
logging.configure_logging(site_config)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer = PrettyPrinter()
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
if os.getenv('RFM_GRAYLOG_SERVER'):
printer.warning(
'RFM_GRAYLOG_SERVER environment variable is deprecated; '
'please use RFM_GRAYLOG_ADDRESS instead'
)
os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')
if options.upgrade_config_file is not None:
old_config, *new_config = options.upgrade_config_file.split(
':', maxsplit=1
)
new_config = new_config[0] if new_config else None
try:
new_config = config.convert_old_config(old_config, new_config)
except Exception as e:
printer.error(f'could not convert file: {e}')
sys.exit(1)
printer.info(
f'Conversion successful! '
f'The converted file can be found at {new_config!r}.'
)
sys.exit(0)
# Now configure ReFrame according to the user configuration file
try:
try:
printer.debug('Loading user configuration')
site_config = config.load_config(options.config_file)
except warnings.ReframeDeprecationWarning as e:
printer.warning(e)
converted = config.convert_old_config(options.config_file)
printer.warning(
f"configuration file has been converted "
f"to the new syntax here: '{converted}'"
)
site_config = config.load_config(converted)
site_config.validate()
# We ignore errors about unresolved sections or configuration
# parameters here, because they might be defined at the individual
# partition level and will be caught when we will instantiating
# internally the system and partitions later on.
site_config.select_subconfig(options.system,
ignore_resolve_errors=True)
for err in options.update_config(site_config):
printer.warning(str(err))
# Update options from the selected execution mode
if options.mode:
mode_args = site_config.get(f'modes/@{options.mode}/options')
# We lexically split the mode options, because otherwise spaces
# will be treated as part of the option argument; see GH bug #1554
mode_args = list(itertools.chain.from_iterable(shlex.split(m)
for m in mode_args))
# Parse the mode's options and reparse the command-line
options = argparser.parse_args(mode_args)
options = argparser.parse_args(namespace=options.cmd_options)
options.update_config(site_config)
logging.configure_logging(site_config)
except (OSError, errors.ConfigError) as e:
printer.error(f'failed to load configuration: {e}')
printer.error(logfiles_message())
sys.exit(1)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
try:
printer.debug('Initializing runtime')
runtime.init_runtime(site_config)
except errors.ConfigError as e:
printer.error(f'failed to initialize runtime: {e}')
printer.error(logfiles_message())
sys.exit(1)
rt = runtime.runtime()
try:
if site_config.get('general/0/module_map_file'):
rt.modules_system.load_mapping_from_file(
site_config.get('general/0/module_map_file')
)
if site_config.get('general/0/module_mappings'):
for m in site_config.get('general/0/module_mappings'):
rt.modules_system.load_mapping(m)
except (errors.ConfigError, OSError) as e:
printer.error('could not load module mappings: %s' % e)
sys.exit(1)
if (osext.samefile(rt.stage_prefix, rt.output_prefix) and
not site_config.get('general/0/keep_stage_files')):
printer.error("stage and output refer to the same directory; "
"if this is on purpose, please use the "
"'--keep-stage-files' option.")
printer.error(logfiles_message())
sys.exit(1)
# Show configuration after everything is set up
if options.show_config:
config_param = options.show_config
if config_param == 'all':
printer.info(str(rt.site_config))
else:
value = rt.get_option(config_param)
if value is None:
printer.error(
f'no such configuration parameter found: {config_param}'
)
else:
printer.info(json.dumps(value, indent=2))
sys.exit(0)
printer.debug(format_env(options.env_vars))
# Setup the check loader
if options.restore_session is not None:
# We need to load the failed checks only from a report
if options.restore_session:
filename = options.restore_session
else:
filename = runreport.next_report_filename(
osext.expandvars(site_config.get('general/0/report_file')),
new=False
)
report = runreport.load_report(filename)
check_search_path = list(report.slice('filename', unique=True))
check_search_recursive = False
# If `-c` or `-R` are passed explicitly outside the configuration
# file, override the values set from the report file
if site_config.is_sticky_option('general/check_search_path'):
printer.warning(
'Ignoring check search path set in the report file: '
'search path set explicitly in the command-line or '
'the environment'
)
check_search_path = site_config.get(
'general/0/check_search_path'
)
if site_config.is_sticky_option('general/check_search_recursive'):
printer.warning(
'Ignoring check search recursive option from the report file: '
'option set explicitly in the command-line or the environment'
)
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
else:
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
check_search_path = site_config.get('general/0/check_search_path')
loader = RegressionCheckLoader(
load_path=check_search_path,
recurse=check_search_recursive,
ignore_conflicts=site_config.get(
'general/0/ignore_check_conflicts'
)
)
def print_infoline(param, value):
param = param + ':'
printer.info(f" {param.ljust(18)} {value}")
session_info = {
'cmdline': ' '.join(sys.argv),
'config_file': rt.site_config.filename,
'data_version': runreport.DATA_VERSION,
'hostname': socket.gethostname(),
'prefix_output': rt.output_prefix,
'prefix_stage': rt.stage_prefix,
'user': osext.osuser(),
'version': osext.reframe_version(),
'workdir': os.getcwd(),
}
# Print command line
printer.info(f"[ReFrame Setup]")
print_infoline('version', session_info['version'])
print_infoline('command', repr(session_info['cmdline']))
print_infoline(
f"launched by",
f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}"
)
print_infoline('working directory', repr(session_info['workdir']))
print_infoline('settings file', f"{session_info['config_file']!r}")
print_infoline('check search path',
f"{'(R) ' if loader.recurse else ''}"
f"{':'.join(loader.load_path)!r}")
print_infoline('stage directory', repr(session_info['prefix_stage']))
print_infoline('output directory', repr(session_info['prefix_output']))
printer.info('')
try:
# Locate and load checks
try:
checks_found = loader.load_all()
printer.verbose(f'Loaded {len(checks_found)} test(s)')
except OSError as e:
raise errors.ReframeError from e
# Generate all possible test cases first; we will need them for
# resolving dependencies after filtering
# Determine the allowed programming environments
allowed_environs = {e.name
for env_patt in options.prgenv
for p in rt.system.partitions
for e in p.environs if re.match(env_patt, e.name)}
testcases_all = generate_testcases(checks_found,
options.skip_system_check,
options.skip_prgenv_check,
allowed_environs)
testcases = testcases_all
printer.verbose(f'Generated {len(testcases)} test case(s)')
# Filter test cases by name
if options.exclude_names:
for name in options.exclude_names:
testcases = filter(filters.have_not_name(name), testcases)
if options.names:
testcases = filter(
filters.have_name('|'.join(options.names)), testcases
)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by name: {len(testcases)} remaining'
)
# Filter test cases by tags
for tag in options.tags:
testcases = filter(filters.have_tag(tag), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by tags: {len(testcases)} remaining'
)
# Filter test cases further
if options.gpu_only and options.cpu_only:
printer.error("options `--gpu-only' and `--cpu-only' "
"are mutually exclusive")
sys.exit(1)
if options.gpu_only:
testcases = filter(filters.have_gpu_only(), testcases)
elif options.cpu_only:
testcases = filter(filters.have_cpu_only(), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by other attributes: '
f'{len(testcases)} remaining'
)
# Filter in failed cases
if options.failed:
if options.restore_session is None:
printer.error(
"the option '--failed' can only be used "
"in combination with the '--restore-session' option"
)
sys.exit(1)
def _case_failed(t):
rec = report.case(*t)
if rec and rec['result'] == 'failure':
return True
else:
return False
testcases = list(filter(_case_failed, testcases))
printer.verbose(
f'Filtering successful test case(s): '
f'{len(testcases)} remaining'
)
# Prepare for running
printer.debug('Building and validating the full test DAG')
testgraph, skipped_cases = dependencies.build_deps(testcases_all)
if skipped_cases:
# Some cases were skipped, so adjust testcases
testcases = list(set(testcases) - set(skipped_cases))
printer.verbose(
f'Filtering test case(s) due to unresolved dependencies: '
f'{len(testcases)} remaining'
)
dependencies.validate_deps(testgraph)
printer.debug('Full test DAG:')
printer.debug(dependencies.format_deps(testgraph))
restored_cases = []
if len(testcases) != len(testcases_all):
testgraph = dependencies.prune_deps(
testgraph, testcases,
max_depth=1 if options.restore_session is not None else None
)
printer.debug('Pruned test DAG')
printer.debug(dependencies.format_deps(testgraph))
if options.restore_session is not None:
testgraph, restored_cases = report.restore_dangling(testgraph)
testcases = dependencies.toposort(
testgraph,
is_subgraph=options.restore_session is not None
)
printer.verbose(f'Final number of test cases: {len(testcases)}')
# Disable hooks
for tc in testcases:
for h in options.hooks:
type(tc.check).disable_hook(h)
# Act on checks
if options.list or options.list_detailed:
list_checks(testcases, printer, options.list_detailed)
sys.exit(0)
if not options.run:
printer.error(f"No action specified. Please specify `-l'/`-L' for "
f"listing or `-r' for running. "
f"Try `{argparser.prog} -h' for more options.")
sys.exit(1)
# Manipulate ReFrame's environment
if site_config.get('general/0/purge_environment'):
rt.modules_system.unload_all()
else:
for m in site_config.get('general/0/unload_modules'):
rt.modules_system.unload_module(**m)
# Load the environment for the current system
try:
printer.debug(f'Loading environment for current system')
runtime.loadenv(rt.system.preload_environ)
except errors.EnvironError as e:
printer.error("failed to load current system's environment; "
"please check your configuration")
printer.debug(str(e))
raise
def module_use(*paths):
try:
rt.modules_system.searchpath_add(*paths)
except errors.EnvironError as e:
printer.warning(f'could not add module paths correctly')
printer.debug(str(e))
def module_unuse(*paths):
try:
rt.modules_system.searchpath_remove(*paths)
except errors.EnvironError as e:
printer.warning(f'could not remove module paths correctly')
printer.debug(str(e))
printer.debug('(Un)using module paths from command line')
module_paths = {}
for d in options.module_paths:
if d.startswith('-'):
module_paths.setdefault('-', [])
module_paths['-'].append(d[1:])
elif d.startswith('+'):
module_paths.setdefault('+', [])
module_paths['+'].append(d[1:])
else:
module_paths.setdefault('x', [])
module_paths['x'].append(d)
for op, paths in module_paths.items():
if op == '+':
module_use(*paths)
elif op == '-':
module_unuse(*paths)
else:
# First empty the current module path in a portable way
searchpath = [p for p in rt.modules_system.searchpath if p]
if searchpath:
rt.modules_system.searchpath_remove(*searchpath)
# Treat `A:B` syntax as well in this case
paths = itertools.chain(*(p.split(':') for p in paths))
module_use(*paths)
printer.debug('Loading user modules from command line')
for m in site_config.get('general/0/user_modules'):
try:
rt.modules_system.load_module(**m, force=True)
except errors.EnvironError as e:
printer.warning(
f'could not load module {m["name"]!r} correctly; '
f'skipping...'
)
printer.debug(str(e))
options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'
# Run the tests
# Setup the execution policy
if options.exec_policy == 'serial':
exec_policy = SerialExecutionPolicy()
elif options.exec_policy == 'async':
exec_policy = AsynchronousExecutionPolicy()
else:
# This should not happen, since choices are handled by
# argparser
printer.error("unknown execution policy `%s': Exiting...")
sys.exit(1)
exec_policy.skip_system_check = options.skip_system_check
exec_policy.force_local = options.force_local
exec_policy.strict_check = options.strict
exec_policy.skip_sanity_check = options.skip_sanity_check
exec_policy.skip_performance_check = options.skip_performance_check
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
if sched_flex_alloc_nodes <= 0:
raise errors.ConfigError(
errmsg.format(options.flex_alloc_nodes)
)
except ValueError:
sched_flex_alloc_nodes = options.flex_alloc_nodes
exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
parsed_job_options = []
for opt in options.job_options:
if opt.startswith('-') or opt.startswith('#'):
parsed_job_options.append(opt)
elif len(opt) == 1:
parsed_job_options.append(f'-{opt}')
else:
parsed_job_options.append(f'--{opt}')
exec_policy.sched_options = parsed_job_options
try:
max_retries = int(options.max_retries)
except ValueError:
raise errors.ConfigError(
f'--max-retries is not a valid integer: {max_retries}'
) from None
try:
max_fail = int(options.max_fail)
except ValueError:
raise errors.ConfigError(
f'--max-fail is not a valid integer: {max_fail}'
) from None
runner = Runner(exec_policy, printer, max_retries, max_fail)
try:
time_start = time.time()
session_info['time_start'] = time.strftime(
'%FT%T%z', time.localtime(time_start),
)
runner.runall(testcases, restored_cases)
finally:
time_end = time.time()
session_info['time_end'] = time.strftime(
'%FT%T%z', time.localtime(time_end)
)
session_info['time_elapsed'] = time_end - time_start
# Print a retry report if we did any retries
if runner.stats.failures(run=0):
printer.info(runner.stats.retry_report())
# Print a failure report if we had failures in the last run
success = True
if runner.stats.failures():
success = False
runner.stats.print_failure_report(printer)
if options.failure_stats:
runner.stats.print_failure_stats(printer)
if options.performance_report:
printer.info(runner.stats.performance_report())
# Generate the report for this session
report_file = os.path.normpath(
osext.expandvars(rt.get_option('general/0/report_file'))
)
basedir = os.path.dirname(report_file)
if basedir:
os.makedirs(basedir, exist_ok=True)
# Build final JSON report
run_stats = runner.stats.json()
session_info.update({
'num_cases': run_stats[0]['num_cases'],
'num_failures': run_stats[-1]['num_failures']
})
json_report = {
'session_info': session_info,
'runs': run_stats,
'restored_cases': []
}
if options.restore_session is not None:
for c in restored_cases:
json_report['restored_cases'].append(report.case(*c))
report_file = runreport.next_report_filename(report_file)
try:
with open(report_file, 'w') as fp:
jsonext.dump(json_report, fp, indent=2)
fp.write('\n')
except OSError as e:
printer.warning(
f'failed to generate report in {report_file!r}: {e}'
)
if not success:
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(1)
except errors.ReframeError as e:
printer.error(str(e))
sys.exit(1)
except (Exception, errors.ReframeFatalError):
exc_info = sys.exc_info()
tb = ''.join(traceback.format_exception(*exc_info))
printer.error(errors.what(*exc_info))
if errors.is_severe(*exc_info):
printer.error(tb)
else:
printer.verbose(tb)
sys.exit(1)
finally:
try:
log_files = logging.log_files()
if site_config.get('general/0/save_log_files'):
log_files = logging.save_log_files(rt.output_prefix)
except OSError as e:
printer.error(f'could not save log file: {e}')
sys.exit(1)
finally:
printer.info(logfiles_message())
|
704 |
def _get_clickable(clickdata, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = list(form.xpath(
'descendant::input[re:test(@type, "^(submit|image)$", "i")]'
'|descendant::button[not(@type) or re:test(@type, "^submit$", "i")]',
namespaces={"re": "http://exslt.org/regular-expressions"}
))
if not clickables:
return
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get('name'), el.get('value') or '')
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.get('name'), el.get('value') or '')
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = './/*' + ''.join(f'[@{key}="{clickdata[key]}"]' for key in clickdata)
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get('name'), el[0].get('value') or '')
elif len(el) > 1:
raise ValueError(f"Multiple elements found ({el!r}) matching the "
f"criteria in clickdata: {clickdata!r}")
else:
raise ValueError(f'No clickable element matching clickdata: {clickdata!r}')
|
def _get_clickable(clickdata, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = list(form.xpath(
'descendant::input[re:test(@type, "^(submit|image)$", "i")]'
'|descendant::button[not(@type) or re:test(@type, "^submit$", "i")]',
namespaces={"re": "http://exslt.org/regular-expressions"}
))
if not clickables:
return
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get('name'), el.get('value') or '')
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.get('name'), el.get('value') or '')
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = './/*' + ''.join(f'[@{k}="{v}"]' for k, v in clickdata.items())
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get('name'), el[0].get('value') or '')
elif len(el) > 1:
raise ValueError(f"Multiple elements found ({el!r}) matching the "
f"criteria in clickdata: {clickdata!r}")
else:
raise ValueError(f'No clickable element matching clickdata: {clickdata!r}')
|
6,094 |
def selectUniqueRandomSource(ftsFiles, allowedSources=None):
"""
For a list of FTS3files object, select a random source, and group the files by source.
We also return the FTS3Files for which we had problems getting replicas
:param allowedSources: list of allowed sources
:param ftsFiles: list of FTS3File object
:return: S_OK({ sourceSE: [ FTS3Files] }, {FTS3File: errors})
"""
_log = gLogger.getSubLogger("selectUniqueRandomSource")
allowedSourcesSet = set(allowedSources) if allowedSources else set()
# destGroup will contain for each target SE a dict { source : [list of FTS3Files] }
groupBySource = {}
# For all files, check which possible sources they have
res = _checkSourceReplicas(ftsFiles)
if not res['OK']:
return res
filteredReplicas = res['Value']
# LFNs for which we failed to get replicas
failedFiles = {}
for ftsFile in ftsFiles:
# If we failed to get the replicas, add the FTS3File to
# the dictionnary
if ftsFile.lfn in filteredReplicas['Failed']:
errMsg = filteredReplicas['Failed'][ftsFile.lfn]
failedFiles[ftsFile] = errMsg
_log.debug("Failed to get active replicas", "%s,%s" %
(ftsFile.lfn, errMsg))
continue
replicaDict = filteredReplicas['Successful'][ftsFile.lfn]
# Only consider the allowed sources
# If we have a restriction, apply it, otherwise take all the replicas
allowedReplicaSource = (set(replicaDict) & allowedSourcesSet) if allowedSourcesSet else replicaDict
# pick a random source
randSource = random.choice(list(allowedReplicaSource)) # one has to convert to list
groupBySource.setdefault(randSource, []).append(ftsFile)
return S_OK((groupBySource, failedFiles))
|
def selectUniqueRandomSource(ftsFiles, allowedSources=None):
"""
For a list of FTS3files object, select a random source, and group the files by source.
We also return the FTS3Files for which we had problems getting replicas
:param allowedSources: list of allowed sources
:param ftsFiles: list of FTS3File object
:return: S_OK(({ sourceSE: [ FTS3Files] }, {FTS3File: errors}))
"""
_log = gLogger.getSubLogger("selectUniqueRandomSource")
allowedSourcesSet = set(allowedSources) if allowedSources else set()
# destGroup will contain for each target SE a dict { source : [list of FTS3Files] }
groupBySource = {}
# For all files, check which possible sources they have
res = _checkSourceReplicas(ftsFiles)
if not res['OK']:
return res
filteredReplicas = res['Value']
# LFNs for which we failed to get replicas
failedFiles = {}
for ftsFile in ftsFiles:
# If we failed to get the replicas, add the FTS3File to
# the dictionnary
if ftsFile.lfn in filteredReplicas['Failed']:
errMsg = filteredReplicas['Failed'][ftsFile.lfn]
failedFiles[ftsFile] = errMsg
_log.debug("Failed to get active replicas", "%s,%s" %
(ftsFile.lfn, errMsg))
continue
replicaDict = filteredReplicas['Successful'][ftsFile.lfn]
# Only consider the allowed sources
# If we have a restriction, apply it, otherwise take all the replicas
allowedReplicaSource = (set(replicaDict) & allowedSourcesSet) if allowedSourcesSet else replicaDict
# pick a random source
randSource = random.choice(list(allowedReplicaSource)) # one has to convert to list
groupBySource.setdefault(randSource, []).append(ftsFile)
return S_OK((groupBySource, failedFiles))
|
706 |
def install_reactor(reactor_path, event_loop_path):
"""Installs the :mod:`~twisted.internet.reactor` with the specified
import path.Also installs asyncio event loop as specified in the import
path if asyncio reactor is enabled"""
reactor_class = load_object(reactor_path)
if reactor_class is asyncioreactor.AsyncioSelectorReactor:
with suppress(error.ReactorAlreadyInstalledError):
if event_loop_path is not None:
x = __import__(event_loop_path)
if x is not None:
loop = x.new_event_loop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
asyncioreactor.install(loop)
else:
*module, _ = reactor_path.split(".")
installer_path = module + ["install"]
installer = load_object(".".join(installer_path))
with suppress(error.ReactorAlreadyInstalledError):
installer()
|
def install_reactor(reactor_path, event_loop_path):
"""Installs the :mod:`~twisted.internet.reactor` with the specified
import path.Also installs asyncio event loop as specified in the import
path if the asyncio reactor is enabled"""
reactor_class = load_object(reactor_path)
if reactor_class is asyncioreactor.AsyncioSelectorReactor:
with suppress(error.ReactorAlreadyInstalledError):
if event_loop_path is not None:
x = __import__(event_loop_path)
if x is not None:
loop = x.new_event_loop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
asyncioreactor.install(loop)
else:
*module, _ = reactor_path.split(".")
installer_path = module + ["install"]
installer = load_object(".".join(installer_path))
with suppress(error.ReactorAlreadyInstalledError):
installer()
|
6,756 |
def manage_recurring_payment_profile_status(profile_id, action, args, url):
args.update({
"METHOD": "ManageRecurringPaymentsProfileStatus",
"PROFILEID": profile_id,
"ACTION": action
})
response = make_post_request(url, data=args)
# error code 11556 indicates profile is not in active state(or already cancelled)
# thus could not cancel the subscription.
# thus raise exception only if error code not quals to 11556
if response.get("ACK")[0] != "Success" and response.get("L_ERRORCODE0", [])[0] != '11556':
frappe.throw(_("Failed while amending subscription"))
|
def manage_recurring_payment_profile_status(profile_id, action, args, url):
args.update({
"METHOD": "ManageRecurringPaymentsProfileStatus",
"PROFILEID": profile_id,
"ACTION": action
})
response = make_post_request(url, data=args)
# error code 11556 indicates profile is not in active state(or already cancelled)
# thus could not cancel the subscription.
# thus raise an exception only if the error code is not equal to 11556
if response.get("ACK")[0] != "Success" and response.get("L_ERRORCODE0", [])[0] != '11556':
frappe.throw(_("Failed while amending subscription"))
|
12,250 |
def get_parser() -> ArgumentParser:
parser = ArgumentParser(
usage="%(prog)s [options] [connection string]",
description=(
"htop like application for PostgreSQL server activity monitoring."
),
epilog=(
"The connection string can be in the form of a list of "
"Key/Value parameters or an URI as described in the PostgreSQL documentation. "
"The parsing is delegated to the libpq: different versions of the client library "
"may support different formats or parameters (for example, connection URIs are "
"only supported from libpq 9.2)."
),
add_help=False,
)
group = parser.add_argument_group(
"Options",
)
# --blocksize
group.add_argument(
"--blocksize",
dest="blocksize",
help="Filesystem blocksize (default: %(default)s).",
metavar="BLOCKSIZE",
type=int,
default=4096,
)
# --rds
group.add_argument(
"--rds",
dest="rds",
action="store_true",
help="Enable support for AWS RDS (implies --no-tempfiles).",
default=False,
)
# --output
group.add_argument(
"--output",
dest="output",
help="Store running queries as CSV.",
metavar="FILEPATH",
default=None,
)
# --no-db-size
group.add_argument(
"--no-db-size",
dest="nodbsize",
action="store_true",
help="Skip total size of DB.",
default=False,
)
# --no-tempfile
group.add_argument(
"--no-tempfile",
dest="notempfile",
action="store_true",
help="Skip tempfile count and size.",
default=False,
)
# --no-walreceiver
group.add_argument(
"--no-walreceiver",
dest="nowalreceiver",
action="store_true",
help="Skip walreceiver checks.",
default=False,
)
# --wrap-query
group.add_argument(
"-w",
"--wrap-query",
dest="wrap_query",
action="store_true",
help="Wrap query column instead of truncating.",
default=False,
)
# --duration-mode
group.add_argument(
"--duration-mode",
dest="durationmode",
help="Duration mode. Values: 1-QUERY(default), 2-TRANSACTION, 3-BACKEND.",
metavar="DURATION_MODE",
choices=["1", "2", "3"],
default="1",
)
# --min-duration
group.add_argument(
"--min-duration",
dest="minduration",
help="Don't display queries with smaller than specified duration (in seconds).",
metavar="SECONDS",
type=float,
default=0,
)
# --filter
group.add_argument(
"--filter",
dest="filters",
help=(
"Filter activities with a (case insensitive) regular expression applied on selected fields. "
"Known fields are: dbname."
),
action="append",
metavar="FIELD:REGEX",
default=[],
)
# --version
group.add_argument(
"--version",
help="show program's version number and exit.",
action="version",
version=f"%(prog)s {__version__}",
)
# --help
group.add_argument(
"--help",
dest="help",
action="store_true",
help="Show this help message and exit.",
default=False,
)
group = parser.add_argument_group(
"Connection Options",
)
# Connection string
group.add_argument(
"connection_string",
help=(
"A valid connection string to the database, e.g.: "
"'host=HOSTNAME port=PORT user=USER dbname=DBNAME'."
),
nargs="?",
metavar="connection string",
)
# -h / --host
group.add_argument(
"-h",
"--host",
dest="host",
help="Database server host or socket directory.",
metavar="HOSTNAME",
)
# -p / --port
group.add_argument(
"-p",
"--port",
dest="port",
help="Database server port.",
metavar="PORT",
)
# -U / --username
group.add_argument(
"-U",
"--username",
dest="username",
help="Database user name.",
metavar="USERNAME",
)
# -d / --dbname
group.add_argument(
"-d",
"--dbname",
dest="dbname",
help="Database name to connect to.",
metavar="DBNAME",
)
group = parser.add_argument_group(
"Process table display options",
"These options may be used hide some columns from the processes table.",
)
# --no-pid
group.add_argument(
"--no-pid",
dest="nopid",
action="store_true",
help="Disable PID.",
default=False,
)
# --no-database
group.add_argument(
"--no-database",
dest="nodb",
action="store_true",
help="Disable DATABASE.",
default=False,
)
# --no-user
group.add_argument(
"--no-user",
dest="nouser",
action="store_true",
help="Disable USER.",
default=False,
)
# --no-client
group.add_argument(
"--no-client",
dest="noclient",
action="store_true",
help="Disable CLIENT.",
default=False,
)
# --no-cpu
group.add_argument(
"--no-cpu",
dest="nocpu",
action="store_true",
help="Disable CPU%%.",
default=False,
)
# --no-mem
group.add_argument(
"--no-mem",
dest="nomem",
action="store_true",
help="Disable MEM%%.",
default=False,
)
# --no-read
group.add_argument(
"--no-read",
dest="noread",
action="store_true",
help="Disable READ/s.",
default=False,
)
# --no-write
group.add_argument(
"--no-write",
dest="nowrite",
action="store_true",
help="Disable WRITE/s.",
default=False,
)
# --no-time
group.add_argument(
"--no-time",
dest="notime",
action="store_true",
help="Disable TIME+.",
default=False,
)
# --no-wait
group.add_argument(
"--no-wait",
dest="nowait",
action="store_true",
help="Disable W.",
default=False,
)
# --no-app-name
group.add_argument(
"--no-app-name",
dest="noappname",
action="store_true",
help="Disable App.",
default=False,
)
group = parser.add_argument_group("Other display options")
# --hide-queries-in-logs
group.add_argument(
"--hide-queries-in-logs",
dest="hide_queries_in_logs",
action="store_true",
help="Disable log_min_duration_statements and log_min_duration_sample for pg_activity.",
default=False,
)
# --no-inst-info
group.add_argument(
"--no-inst-info",
dest="show_instance_info_in_header",
action="store_false",
help="Display instance information in header.",
default=True,
)
# --no-sys-info
group.add_argument(
"--no-sys-info",
dest="show_system_info_in_header",
action="store_false",
help="Display system information in header.",
default=True,
)
# --no-proc-info
group.add_argument(
"--no-proc-info",
dest="show_worker_info_in_header",
action="store_false",
help="Display workers process information in header.",
default=True,
)
# --refresh
group.add_argument(
"--refresh",
dest="refresh",
help="Refresh rate. Values: %(choices)s (default: %(default)d).",
metavar="REFRESH",
choices=[0.5, 1, 2, 3, 4, 5],
type=float,
default=2,
)
return parser
|
def get_parser() -> ArgumentParser:
parser = ArgumentParser(
usage="%(prog)s [options] [connection string]",
description=(
"htop like application for PostgreSQL server activity monitoring."
),
epilog=(
"The connection string can be in the form of a list of "
"Key/Value parameters or an URI as described in the PostgreSQL documentation. "
"The parsing is delegated to the libpq: different versions of the client library "
"may support different formats or parameters (for example, connection URIs are "
"only supported from libpq 9.2)."
),
add_help=False,
)
group = parser.add_argument_group(
"Options",
)
# --blocksize
group.add_argument(
"--blocksize",
dest="blocksize",
help="Filesystem blocksize (default: %(default)s).",
metavar="BLOCKSIZE",
type=int,
default=4096,
)
# --rds
group.add_argument(
"--rds",
dest="rds",
action="store_true",
help="Enable support for AWS RDS (implies --no-tempfile).",
default=False,
)
# --output
group.add_argument(
"--output",
dest="output",
help="Store running queries as CSV.",
metavar="FILEPATH",
default=None,
)
# --no-db-size
group.add_argument(
"--no-db-size",
dest="nodbsize",
action="store_true",
help="Skip total size of DB.",
default=False,
)
# --no-tempfile
group.add_argument(
"--no-tempfile",
dest="notempfile",
action="store_true",
help="Skip tempfile count and size.",
default=False,
)
# --no-walreceiver
group.add_argument(
"--no-walreceiver",
dest="nowalreceiver",
action="store_true",
help="Skip walreceiver checks.",
default=False,
)
# --wrap-query
group.add_argument(
"-w",
"--wrap-query",
dest="wrap_query",
action="store_true",
help="Wrap query column instead of truncating.",
default=False,
)
# --duration-mode
group.add_argument(
"--duration-mode",
dest="durationmode",
help="Duration mode. Values: 1-QUERY(default), 2-TRANSACTION, 3-BACKEND.",
metavar="DURATION_MODE",
choices=["1", "2", "3"],
default="1",
)
# --min-duration
group.add_argument(
"--min-duration",
dest="minduration",
help="Don't display queries with smaller than specified duration (in seconds).",
metavar="SECONDS",
type=float,
default=0,
)
# --filter
group.add_argument(
"--filter",
dest="filters",
help=(
"Filter activities with a (case insensitive) regular expression applied on selected fields. "
"Known fields are: dbname."
),
action="append",
metavar="FIELD:REGEX",
default=[],
)
# --version
group.add_argument(
"--version",
help="show program's version number and exit.",
action="version",
version=f"%(prog)s {__version__}",
)
# --help
group.add_argument(
"--help",
dest="help",
action="store_true",
help="Show this help message and exit.",
default=False,
)
group = parser.add_argument_group(
"Connection Options",
)
# Connection string
group.add_argument(
"connection_string",
help=(
"A valid connection string to the database, e.g.: "
"'host=HOSTNAME port=PORT user=USER dbname=DBNAME'."
),
nargs="?",
metavar="connection string",
)
# -h / --host
group.add_argument(
"-h",
"--host",
dest="host",
help="Database server host or socket directory.",
metavar="HOSTNAME",
)
# -p / --port
group.add_argument(
"-p",
"--port",
dest="port",
help="Database server port.",
metavar="PORT",
)
# -U / --username
group.add_argument(
"-U",
"--username",
dest="username",
help="Database user name.",
metavar="USERNAME",
)
# -d / --dbname
group.add_argument(
"-d",
"--dbname",
dest="dbname",
help="Database name to connect to.",
metavar="DBNAME",
)
group = parser.add_argument_group(
"Process table display options",
"These options may be used hide some columns from the processes table.",
)
# --no-pid
group.add_argument(
"--no-pid",
dest="nopid",
action="store_true",
help="Disable PID.",
default=False,
)
# --no-database
group.add_argument(
"--no-database",
dest="nodb",
action="store_true",
help="Disable DATABASE.",
default=False,
)
# --no-user
group.add_argument(
"--no-user",
dest="nouser",
action="store_true",
help="Disable USER.",
default=False,
)
# --no-client
group.add_argument(
"--no-client",
dest="noclient",
action="store_true",
help="Disable CLIENT.",
default=False,
)
# --no-cpu
group.add_argument(
"--no-cpu",
dest="nocpu",
action="store_true",
help="Disable CPU%%.",
default=False,
)
# --no-mem
group.add_argument(
"--no-mem",
dest="nomem",
action="store_true",
help="Disable MEM%%.",
default=False,
)
# --no-read
group.add_argument(
"--no-read",
dest="noread",
action="store_true",
help="Disable READ/s.",
default=False,
)
# --no-write
group.add_argument(
"--no-write",
dest="nowrite",
action="store_true",
help="Disable WRITE/s.",
default=False,
)
# --no-time
group.add_argument(
"--no-time",
dest="notime",
action="store_true",
help="Disable TIME+.",
default=False,
)
# --no-wait
group.add_argument(
"--no-wait",
dest="nowait",
action="store_true",
help="Disable W.",
default=False,
)
# --no-app-name
group.add_argument(
"--no-app-name",
dest="noappname",
action="store_true",
help="Disable App.",
default=False,
)
group = parser.add_argument_group("Other display options")
# --hide-queries-in-logs
group.add_argument(
"--hide-queries-in-logs",
dest="hide_queries_in_logs",
action="store_true",
help="Disable log_min_duration_statements and log_min_duration_sample for pg_activity.",
default=False,
)
# --no-inst-info
group.add_argument(
"--no-inst-info",
dest="show_instance_info_in_header",
action="store_false",
help="Display instance information in header.",
default=True,
)
# --no-sys-info
group.add_argument(
"--no-sys-info",
dest="show_system_info_in_header",
action="store_false",
help="Display system information in header.",
default=True,
)
# --no-proc-info
group.add_argument(
"--no-proc-info",
dest="show_worker_info_in_header",
action="store_false",
help="Display workers process information in header.",
default=True,
)
# --refresh
group.add_argument(
"--refresh",
dest="refresh",
help="Refresh rate. Values: %(choices)s (default: %(default)d).",
metavar="REFRESH",
choices=[0.5, 1, 2, 3, 4, 5],
type=float,
default=2,
)
return parser
|
5,957 |
def format_decimal(d, f, grouping=True):
f = f or '%.3f'
print(f)
return locale.format_string(f, d, grouping=grouping).rstrip('0').rstrip('.')
|
def format_decimal(d, f='%.3f', grouping=True):
return locale.format_string(f, d, grouping=grouping).rstrip('0').rstrip('.')
|
35,522 |
def get_fw_versions(logcan, sendcan, extra=None, timeout=0.1, debug=False, progress=False):
ecu_types = {}
# Extract ECU addresses to query from fingerprints
# ECUs using a subadress need be queried one by one, the rest can be done in parallel
addrs = []
parallel_addrs = []
versions = get_attr_from_cars('FW_VERSIONS', combine_brands=False)
if extra is not None:
versions.update(extra)
for brand, brand_versions in versions.items():
for c in brand_versions.values():
for ecu_type, addr, sub_addr in c.keys():
a = (brand, addr, sub_addr)
if a not in ecu_types:
ecu_types[(addr, sub_addr)] = ecu_type
if sub_addr is None:
if a not in parallel_addrs:
parallel_addrs.append(a)
else:
if [a] not in addrs:
addrs.append([a])
addrs.insert(0, parallel_addrs)
fw_versions = {}
for i, addr in enumerate(tqdm(addrs, disable=not progress)):
for addr_chunk in chunks(addr):
for r in REQUESTS:
try:
addrs = [(b, a, s) for (b, a, s) in addr_chunk if b in (r.brand, 'any') and
(len(r.whitelist_ecus) == 0 or ecu_types[(a, s)] in r.whitelist_ecus)]
if addrs:
query = IsoTpParallelQuery(sendcan, logcan, r.bus, addrs, r.request, r.response, r.rx_offset, debug=debug)
t = 2 * timeout if i == 0 else timeout
fw_versions.update(query.get_data(t))
except Exception:
cloudlog.warning(f"FW query exception: {traceback.format_exc()}")
# Build capnp list to put into CarParams
car_fw = []
for addr, version in fw_versions.items():
f = car.CarParams.CarFw.new_message()
f.ecu = ecu_types[addr]
f.fwVersion = version
f.address = addr[0]
if addr[1] is not None:
f.subAddress = addr[1]
car_fw.append(f)
return car_fw
|
def get_fw_versions(logcan, sendcan, extra=None, timeout=0.1, debug=False, progress=False):
ecu_types = {}
# Extract ECU addresses to query from fingerprints
# ECUs using a subadress need be queried one by one, the rest can be done in parallel
addrs = []
parallel_addrs = []
versions = get_attr_from_cars('FW_VERSIONS', combine_brands=False)
if extra is not None:
versions.update(extra)
for brand, brand_versions in versions.items():
for c in brand_versions.values():
for ecu_type, addr, sub_addr in c.keys():
a = (brand, addr, sub_addr)
if a not in ecu_types:
ecu_types[(addr, sub_addr)] = ecu_type
if sub_addr is None:
if a not in parallel_addrs:
parallel_addrs.append(a)
else:
if [a] not in addrs:
addrs.append([a])
addrs.insert(0, parallel_addrs)
fw_versions = {}
for i, addr in enumerate(tqdm(addrs, disable=not progress)):
for addr_chunk in chunks(addr):
for r in REQUESTS:
try:
addrs = [(a, s) for (b, a, s) in addr_chunk if b in (r.brand, 'any') and
(len(r.whitelist_ecus) == 0 or ecu_types[(a, s)] in r.whitelist_ecus)]
if addrs:
query = IsoTpParallelQuery(sendcan, logcan, r.bus, addrs, r.request, r.response, r.rx_offset, debug=debug)
t = 2 * timeout if i == 0 else timeout
fw_versions.update(query.get_data(t))
except Exception:
cloudlog.warning(f"FW query exception: {traceback.format_exc()}")
# Build capnp list to put into CarParams
car_fw = []
for addr, version in fw_versions.items():
f = car.CarParams.CarFw.new_message()
f.ecu = ecu_types[addr]
f.fwVersion = version
f.address = addr[0]
if addr[1] is not None:
f.subAddress = addr[1]
car_fw.append(f)
return car_fw
|
6,964 |
def create_energy_points_log(ref_doctype, ref_name, doc, apply_only_once=False):
doc = frappe._dict(doc)
log_exists = check_if_log_exists(ref_doctype,
ref_name, doc.rule, None if apply_only_once else doc.user)
if log_exists:
filters = frappe._dict({
'rule': doc.rule,
'reference_doctype': ref_doctype,
'reference_name': ref_name,
'reverted': 0
})
return frappe.get_doc('Energy Point Log', filters=filters)
new_log = frappe.new_doc('Energy Point Log')
new_log.reference_doctype = ref_doctype
new_log.reference_name = ref_name
new_log.update(doc)
new_log.insert(ignore_permissions=True)
return new_log
|
def create_energy_points_log(ref_doctype, ref_name, doc, apply_only_once=False):
doc = frappe._dict(doc)
log_exists = check_if_log_exists(ref_doctype,
ref_name, doc.rule, None if apply_only_once else doc.user)
if log_exists:
return frappe.get_doc('Energy Point Log', name=log_exists)
new_log = frappe.new_doc('Energy Point Log')
new_log.reference_doctype = ref_doctype
new_log.reference_name = ref_name
new_log.update(doc)
new_log.insert(ignore_permissions=True)
return new_log
|
57,694 |
def main():
try:
entry_context = {
"TroubleShout": {
'Engine': {
'SSL/TLS': docker_container_details()
},
'Endpoint': {
'SSL/TLS': endpoint_certificate("google.com", "443")
}
}
}
human_readable = build_human_readable(entry_context)
return_outputs(human_readable, entry_context, {})
except Exception as e:
return_error(f'Failed to execute Certificate Troubleshout.\n Error: {str(e)}')
|
def main():
try:
entry_context = {
"TroubleShout": {
'Engine': {
'SSL/TLS': docker_container_details(),
},
'Endpoint': {
'SSL/TLS': endpoint_certificate("google.com", "443"),
}
}
}
human_readable = build_human_readable(entry_context)
return_outputs(human_readable, entry_context, {})
except Exception as e:
return_error(f'Failed to execute Certificate Troubleshout.\n Error: {str(e)}')
|
31,463 |
def handle_url_addition_commands(client: Client, demisto_args: dict) -> CommandResults:
"""
Adds the urls to the inbound blacklisted list.
:type client: ``Client``
:param client: Client to use.
:type demisto_args: ``dict``
:param demisto_args: The demisto arguments.
:return: The command results which contains the added urls to the inbound blacklisted list.
:rtype: ``CommandResults``
"""
url = demisto_args.get('url')
if not url:
raise DemistoException(
"You must provide url in order to add it to the inbound blacklisted list.")
remove_nulls_from_dictionary(demisto_args)
demisto_args = camelize(demisto_args, "_", upper_camel=False)
demisto_args['url'] = ','.join(argToList(url))
raw_result = client.inbound_blacklisted_url_add_command(demisto_args)
urls_list = copy.deepcopy(raw_result.get('urls', [raw_result]))
msg = "Urls were successfully added to the inbound blacklisted list"
objects_time_to_readable_time(urls_list, 'updateTime')
readable_output = msg + '\n' + tableToMarkdown('Added Urls', urls_list,
headers=['url', 'pgid', 'cid', 'update_time', 'annotation'],
headerTransform=string_to_table_header, removeNull=True)
return CommandResults(
outputs_prefix="NetscoutAED.Inbound.Blacklist.Url",
outputs_key_field="url",
outputs=urls_list,
raw_response=raw_result,
readable_output=readable_output
)
|
def handle_url_addition_commands(client: Client, demisto_args: dict) -> CommandResults:
"""
Adds the urls to the inbound blacklisted list.
:type client: ``Client``
:param client: Client to use.
:type demisto_args: ``dict``
:param demisto_args: The demisto arguments.
:return: The command results which contains the added urls to the inbound blacklisted list.
:rtype: ``CommandResults``
"""
url = demisto_args.get('url')
if not url:
raise DemistoException(
"You must provide url in order to add it to the inbound blacklisted list.")
remove_nulls_from_dictionary(demisto_args)
demisto_args = camelize(demisto_args, "_", upper_camel=False)
demisto_args['url'] = ','.join(argToList(url))
raw_result = client.inbound_blacklisted_url_add_command(demisto_args)
urls_list = copy.deepcopy(raw_result.get('urls', [raw_result]))
msg = "Urls were successfully added to the inbound blacklisted list"
objects_time_to_readable_time(urls_list, 'updateTime')
readable_output = msg + '\n' + tableToMarkdown('Added Urls', urls_list,
headers=['url', 'pgid', 'cid', 'update_time', 'annotation'],
headerTransform=string_to_table_header, removeNull=True)
return CommandResults(
outputs_prefix="NetscoutAED.Inbound.Blacklist.Url",
outputs_key_field="url",
outputs=urls_list,
raw_response=raw_result,
readable_output=readable_output,
)
|
47,254 |
def init_hf_modules():
"""
Creates the cache directory for modules with an init, and adds it to the Python path.
"""
# This function has already been executed if HF_MODULES_CACH already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(HF_MODULES_CACHE)
os.makedirs(HF_MODULES_CACHE, exist_ok=True)
init_path = Path(HF_MODULES_CACHE) / "__init__.py"
if not init_path.exists():
init_path.touch()
|
def init_hf_modules():
"""
Creates the cache directory for modules with an init, and adds it to the Python path.
"""
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(HF_MODULES_CACHE)
os.makedirs(HF_MODULES_CACHE, exist_ok=True)
init_path = Path(HF_MODULES_CACHE) / "__init__.py"
if not init_path.exists():
init_path.touch()
|
35,428 |
def main(sm=None, pm=None):
gc.disable()
if sm is None:
sm = messaging.SubMaster(['liveLocationKalman', 'carState'], poll=['liveLocationKalman'])
if pm is None:
pm = messaging.PubMaster(['liveParameters'])
params_reader = Params()
# wait for stats about the car to come in from controls
cloudlog.info("paramsd is waiting for CarParams")
CP = car.CarParams.from_bytes(params_reader.get("CarParams", block=True))
cloudlog.info("paramsd got CarParams")
min_sr, max_sr = 0.5 * CP.steerRatio, 2.0 * CP.steerRatio
params = params_reader.get("LiveParameters")
# Check if car model matches
if params is not None:
params = json.loads(params)
if params.get('carFingerprint', None) != CP.carFingerprint:
cloudlog.info("Parameter learner found parameters for wrong car.")
params = None
# Check if starting values are sane
if params is not None:
try:
angle_offset_sane = abs(params.get('angleOffsetAverageDeg')) < 10.0
steer_ratio_sane = min_sr <= params['steerRatio'] <= max_sr
params_sane = angle_offset_sane and steer_ratio_sane
if params is not None and not params_sane:
cloudlog.info(f"Invalid starting values found {params}")
params = None
except Exception as e:
cloudlog.info(f"Error reading params {params}: {str(e)}")
params = None
# TODO: cache the params with the capnp struct
if params is None:
params = {
'carFingerprint': CP.carFingerprint,
'steerRatio': CP.steerRatio,
'stiffnessFactor': 1.0,
'angleOffsetAverageDeg': 0.0,
}
cloudlog.info("Parameter learner resetting to default values")
# When driving in wet conditions the stiffness can go down, and then be too low on the next drive
# Without a way to detect this we have to reset the stiffness every drive
params['stiffnessFactor'] = 1.0
learner = ParamsLearner(CP, params['steerRatio'], params['stiffnessFactor'], math.radians(params['angleOffsetAverageDeg']))
while True:
sm.update()
for which, updated in sm.updated.items():
if updated:
t = sm.logMonoTime[which] * 1e-9
learner.handle_log(t, which, sm[which])
if sm.updated['liveLocationKalman']:
x = learner.kf.x
if not all(map(math.isfinite, x)):
cloudlog.error("NaN in liveParameters estimate. Resetting to default values")
learner = ParamsLearner(CP, CP.steerRatio, 1.0, 0.0)
x = learner.kf.x
msg = messaging.new_message('liveParameters')
msg.logMonoTime = sm.logMonoTime['carState']
msg.liveParameters.posenetValid = True
msg.liveParameters.sensorValid = True
msg.liveParameters.steerRatio = float(x[States.STEER_RATIO])
msg.liveParameters.stiffnessFactor = float(x[States.STIFFNESS])
msg.liveParameters.angleOffsetAverageDeg = math.degrees(x[States.ANGLE_OFFSET])
msg.liveParameters.angleOffsetDeg = msg.liveParameters.angleOffsetAverageDeg + math.degrees(x[States.ANGLE_OFFSET_FAST])
msg.liveParameters.valid = all((
abs(msg.liveParameters.angleOffsetAverageDeg) < 10.0,
abs(msg.liveParameters.angleOffsetDeg) < 10.0,
0.2 <= msg.liveParameters.stiffnessFactor <= 5.0,
min_sr <= msg.liveParameters.steerRatio <= max_sr,
))
if sm.frame % 1200 == 0: # once a minute
params = {
'carFingerprint': CP.carFingerprint,
'steerRatio': msg.liveParameters.steerRatio,
'stiffnessFactor': msg.liveParameters.stiffnessFactor,
'angleOffsetAverageDeg': msg.liveParameters.angleOffsetAverageDeg,
}
put_nonblocking("LiveParameters", json.dumps(params))
pm.send('liveParameters', msg)
|
def main(sm=None, pm=None):
gc.disable()
if sm is None:
sm = messaging.SubMaster(['liveLocationKalman', 'carState'], poll=['liveLocationKalman'])
if pm is None:
pm = messaging.PubMaster(['liveParameters'])
params_reader = Params()
# wait for stats about the car to come in from controls
cloudlog.info("paramsd is waiting for CarParams")
CP = car.CarParams.from_bytes(params_reader.get("CarParams", block=True))
cloudlog.info("paramsd got CarParams")
min_sr, max_sr = 0.5 * CP.steerRatio, 2.0 * CP.steerRatio
params = params_reader.get("LiveParameters")
# Check if car model matches
if params is not None:
params = json.loads(params)
if params.get('carFingerprint', None) != CP.carFingerprint:
cloudlog.info("Parameter learner found parameters for wrong car.")
params = None
# Check if starting values are sane
if params is not None:
try:
angle_offset_sane = abs(params.get('angleOffsetAverageDeg')) < 10.0
steer_ratio_sane = min_sr <= params['steerRatio'] <= max_sr
params_sane = angle_offset_sane and steer_ratio_sane
if not params_sane:
cloudlog.info(f"Invalid starting values found {params}")
params = None
except Exception as e:
cloudlog.info(f"Error reading params {params}: {str(e)}")
params = None
# TODO: cache the params with the capnp struct
if params is None:
params = {
'carFingerprint': CP.carFingerprint,
'steerRatio': CP.steerRatio,
'stiffnessFactor': 1.0,
'angleOffsetAverageDeg': 0.0,
}
cloudlog.info("Parameter learner resetting to default values")
# When driving in wet conditions the stiffness can go down, and then be too low on the next drive
# Without a way to detect this we have to reset the stiffness every drive
params['stiffnessFactor'] = 1.0
learner = ParamsLearner(CP, params['steerRatio'], params['stiffnessFactor'], math.radians(params['angleOffsetAverageDeg']))
while True:
sm.update()
for which, updated in sm.updated.items():
if updated:
t = sm.logMonoTime[which] * 1e-9
learner.handle_log(t, which, sm[which])
if sm.updated['liveLocationKalman']:
x = learner.kf.x
if not all(map(math.isfinite, x)):
cloudlog.error("NaN in liveParameters estimate. Resetting to default values")
learner = ParamsLearner(CP, CP.steerRatio, 1.0, 0.0)
x = learner.kf.x
msg = messaging.new_message('liveParameters')
msg.logMonoTime = sm.logMonoTime['carState']
msg.liveParameters.posenetValid = True
msg.liveParameters.sensorValid = True
msg.liveParameters.steerRatio = float(x[States.STEER_RATIO])
msg.liveParameters.stiffnessFactor = float(x[States.STIFFNESS])
msg.liveParameters.angleOffsetAverageDeg = math.degrees(x[States.ANGLE_OFFSET])
msg.liveParameters.angleOffsetDeg = msg.liveParameters.angleOffsetAverageDeg + math.degrees(x[States.ANGLE_OFFSET_FAST])
msg.liveParameters.valid = all((
abs(msg.liveParameters.angleOffsetAverageDeg) < 10.0,
abs(msg.liveParameters.angleOffsetDeg) < 10.0,
0.2 <= msg.liveParameters.stiffnessFactor <= 5.0,
min_sr <= msg.liveParameters.steerRatio <= max_sr,
))
if sm.frame % 1200 == 0: # once a minute
params = {
'carFingerprint': CP.carFingerprint,
'steerRatio': msg.liveParameters.steerRatio,
'stiffnessFactor': msg.liveParameters.stiffnessFactor,
'angleOffsetAverageDeg': msg.liveParameters.angleOffsetAverageDeg,
}
put_nonblocking("LiveParameters", json.dumps(params))
pm.send('liveParameters', msg)
|
24,822 |
def is_classdef_type(node: nodes.ClassDef) -> bool:
"""Test if ClassDef node is Type."""
if node.name == "type":
return True
return any(
isinstance(base, nodes.Name) and base.name == "type" for base in node.bases
)
|
def is_classdef_type(node: nodes.ClassDef) -> bool:
"""Test if ClassDef node is Type."""
if node.name == "type":
return True
return any(isinstance(b, nodes.Name) and b.name == "type" for b in node.bases)
|
31,396 |
def cisco_stealthwatch_list_security_events_status_command(client: Client, tenant_id: str,
search_id: str) -> CommandResults:
"""Retrieve the status of the security events process using search id
Args:
client (Client): Cisco Stealthwatch Client
tenant_id (str): The id of the tenant of the security events process
search_id (str): The if of the search.
Returns:
CommandResults: Raw response, outputs and readable outputs
"""
response = client.check_security_events_search_progress(tenant_id, search_id)
outputs = response.get('data')
outputs['id'] = search_id
table = tableToMarkdown('Security Events Status Information:', outputs,
headers=['id', 'percentComplete'], removeNull=True)
return CommandResults(
outputs_prefix='CiscoStealthwatch.SecurityEventStatus',
outputs_key_field='id',
raw_response=response,
outputs=outputs,
readable_output=table)
|
def cisco_stealthwatch_list_security_events_status_command(client: Client, tenant_id: str,
search_id: str) -> CommandResults:
"""Retrieve the status of the security events process using search id
Args:
client (Client): Cisco Stealthwatch Client
tenant_id (str): The id of the tenant of the security events process
search_id (str): The if of the search.
Returns:
CommandResults: Raw response, outputs and readable outputs
"""
response = client.check_security_events_search_progress(tenant_id, search_id)
outputs = response.get('data', {})
outputs['id'] = search_id
table = tableToMarkdown('Security Events Status Information:', outputs,
headers=['id', 'percentComplete'], removeNull=True)
return CommandResults(
outputs_prefix='CiscoStealthwatch.SecurityEventStatus',
outputs_key_field='id',
raw_response=response,
outputs=outputs,
readable_output=table)
|
30,981 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
# get the service API url
base_url = params.get('url')
# checks for '/' at the end url, if it is not available add it
if base_url[-1] != '/':
base_url += '/'
# Resetting global variable
global CUSTOM_MAPPING_CREATE
CUSTOM_MAPPING_CREATE = demisto.params().get('customMappingCreateUser')
global CUSTOM_MAPPING_UPDATE
CUSTOM_MAPPING_UPDATE = demisto.params().get('customMappingUpdateUser')
auth_token = params.get('authorization_token')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
LOG(f'Command being called is {command}')
commands = {
'test-module': test_module,
'get-user': get_user_command,
'create-user': create_user_command,
'update-user': update_user_command,
'disable-user': enable_disable_user_command,
'enable-user': enable_disable_user_command
}
try:
client = Client(
base_url=base_url,
auth_token=auth_token,
verify=verify_certificate,
headers={'Content-Type': 'application/json'},
proxy=proxy)
if command in commands:
human_readable, outputs, raw_response = commands[command](client, demisto.args())
return_outputs(readable_output=human_readable, outputs=outputs, raw_response=raw_response)
# Log exceptions
except Exception:
return_error(f'Failed to execute {demisto.command()} command. Traceback: {traceback.format_exc()}')
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
# get the service API url
base_url = params.get('url')
# checks for '/' at the end url, if it is not available add it
if base_url[-1] != '/':
base_url += '/'
# Resetting global variable
global CUSTOM_MAPPING_CREATE
CUSTOM_MAPPING_CREATE = demisto.params().get('customMappingCreateUser')
global CUSTOM_MAPPING_UPDATE
CUSTOM_MAPPING_UPDATE = demisto.params().get('customMappingUpdateUser')
auth_token = params.get('authorization_token')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.info(f'Command being called is {command}')
commands = {
'test-module': test_module,
'get-user': get_user_command,
'create-user': create_user_command,
'update-user': update_user_command,
'disable-user': enable_disable_user_command,
'enable-user': enable_disable_user_command
}
try:
client = Client(
base_url=base_url,
auth_token=auth_token,
verify=verify_certificate,
headers={'Content-Type': 'application/json'},
proxy=proxy)
if command in commands:
human_readable, outputs, raw_response = commands[command](client, demisto.args())
return_outputs(readable_output=human_readable, outputs=outputs, raw_response=raw_response)
# Log exceptions
except Exception:
return_error(f'Failed to execute {demisto.command()} command. Traceback: {traceback.format_exc()}')
|
32,539 |
def convert_events_with_datetime_to_str(events: list) -> list:
"""Convert datetime fields in events to string.
Args:
events (list): Events received from AWS python SDK with datetime in certain fields.
Returns:
events (list): Events with dates as strings only.
"""
for event in events:
if event.get('Resource'):
resource = event.get('Resource', {})
service = event.get('Service', {})
if resource.get('S3BucketDetails'):
s3bucket_details = resource.get('S3BucketDetails')
if s3bucket_details and type(s3bucket_details) == list:
for s3buckt_detail in s3bucket_details:
if type(s3buckt_detail.get('CreatedAt')) == datetime:
s3buckt_detail['CreatedAt'] = s3buckt_detail['CreatedAt'].__str__()
if type(resource.get('EksClusterDetails', {}).get('CreatedAt')) == datetime:
resource['EksClusterDetails']['CreatedAt'] = resource['EksClusterDetails']['CreatedAt'].__str__()
if type(resource.get('EcsClusterDetails', {}).get('TaskDetails', {}).get('TaskCreatedAt')) == datetime:
resource['EcsClusterDetails']['TaskDetails']['TaskCreatedAt'] = \
resource['EcsClusterDetails']['TaskDetails']['TaskCreatedAt'].__str__()
if type(resource.get('EcsClusterDetails', {}).get('TaskDetails', {}).get('StartedAt')) == datetime:
resource['EcsClusterDetails']['TaskDetails']['StartedAt'] = \
resource['EcsClusterDetails']['TaskDetails']['StartedAt'].__str__()
if type(service.get('EbsVolumeScanDetails', {}).get('ScanStartedAt')) == datetime:
service['EbsVolumeScanDetails']['ScanStartedAt'] = \
service['EbsVolumeScanDetails']['ScanStartedAt'].__str__()
if type(service.get('EbsVolumeScanDetails', {}).get('ScanCompletedAt')) == datetime:
service['EbsVolumeScanDetails']['ScanCompletedAt'] = \
service['EbsVolumeScanDetails']['ScanCompletedAt'].__str__()
return events
|
def convert_events_with_datetime_to_str(events: list) -> list:
"""Convert datetime fields in events to string.
Args:
events (list): Events received from AWS python SDK with datetime in certain fields.
Returns:
events (list): Events with dates as strings only.
"""
for event in events:
if resource := event.get('Resource', {}):
service = event.get('Service', {})
if resource.get('S3BucketDetails'):
s3bucket_details = resource.get('S3BucketDetails')
if s3bucket_details and type(s3bucket_details) == list:
for s3buckt_detail in s3bucket_details:
if type(s3buckt_detail.get('CreatedAt')) == datetime:
s3buckt_detail['CreatedAt'] = s3buckt_detail['CreatedAt'].__str__()
if type(resource.get('EksClusterDetails', {}).get('CreatedAt')) == datetime:
resource['EksClusterDetails']['CreatedAt'] = resource['EksClusterDetails']['CreatedAt'].__str__()
if type(resource.get('EcsClusterDetails', {}).get('TaskDetails', {}).get('TaskCreatedAt')) == datetime:
resource['EcsClusterDetails']['TaskDetails']['TaskCreatedAt'] = \
resource['EcsClusterDetails']['TaskDetails']['TaskCreatedAt'].__str__()
if type(resource.get('EcsClusterDetails', {}).get('TaskDetails', {}).get('StartedAt')) == datetime:
resource['EcsClusterDetails']['TaskDetails']['StartedAt'] = \
resource['EcsClusterDetails']['TaskDetails']['StartedAt'].__str__()
if type(service.get('EbsVolumeScanDetails', {}).get('ScanStartedAt')) == datetime:
service['EbsVolumeScanDetails']['ScanStartedAt'] = \
service['EbsVolumeScanDetails']['ScanStartedAt'].__str__()
if type(service.get('EbsVolumeScanDetails', {}).get('ScanCompletedAt')) == datetime:
service['EbsVolumeScanDetails']['ScanCompletedAt'] = \
service['EbsVolumeScanDetails']['ScanCompletedAt'].__str__()
return events
|
44,011 |
def cast(tensor, dtype):
"""Casts the given tensor to a new type.
Args:
tensor (tensor_like): tensor to cast
dtype (str, np.dtype): Any supported NumPy dtype representation; this can be
a string (``"float64"``), a ``np.dtype`` object (``np.dtype("float64")``), or
a dtype class (``np.float64``). If ``tensor`` is not a NumPy array, the
**equivalent** dtype in the dispatched framework is used.
Returns:
tensor_like: a tensor with the same shape and values as ``tensor`` and the
same dtype as ``dtype``
**Example**
We can use NumPy dtype specifiers:
>>> x = torch.tensor([1, 2])
>>> cast(x, np.float64)
tensor([1., 2.], dtype=torch.float64)
We can also use strings:
>>> x = tf.Variable([1, 2])
>>> cast(x, "complex128")
<tf.Tensor: shape=(2,), dtype=complex128, numpy=array([1.+0.j, 2.+0.j])>
"""
if isinstance(tensor, (list, tuple)):
tensor = np.asarray(tensor)
if not isinstance(dtype, str):
try:
dtype = np.dtype(dtype).name
except (AttributeError, TypeError):
dtype = getattr(dtype, "name", dtype)
return ar.astype(tensor, ar.to_backend_dtype(dtype, like=ar.infer_backend(tensor)))
|
def cast(tensor, dtype):
"""Casts the given tensor to a new type.
Args:
tensor (tensor_like): tensor to cast
dtype (str, np.dtype): Any supported NumPy dtype representation; this can be
a string (``"float64"``), a ``np.dtype`` object (``np.dtype("float64")``), or
a dtype class (``np.float64``). If ``tensor`` is not a NumPy array, the
**equivalent** dtype in the dispatched framework is used.
Returns:
tensor_like: a tensor with the same shape and values as ``tensor`` and the
same dtype as ``dtype``
**Example**
We can use NumPy dtype specifiers:
>>> x = torch.tensor([1, 2])
>>> cast(x, np.float64)
tensor([1., 2.], dtype=torch.float64)
We can also use strings:
>>> x = tf.Variable([1, 2])
(JIT) compilation.
>>> cast(x, "complex128")
<tf.Tensor: shape=(2,), dtype=complex128, numpy=array([1.+0.j, 2.+0.j])>
"""
if isinstance(tensor, (list, tuple)):
tensor = np.asarray(tensor)
if not isinstance(dtype, str):
try:
dtype = np.dtype(dtype).name
except (AttributeError, TypeError):
dtype = getattr(dtype, "name", dtype)
return ar.astype(tensor, ar.to_backend_dtype(dtype, like=ar.infer_backend(tensor)))
|
28,158 |
def test_basic_extraction(two_empty_temp_db_connections, some_paramspecs):
source_conn, target_conn = two_empty_temp_db_connections
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
type_casters = {'numeric': float,
'array': (lambda x: np.array(x) if hasattr(x, '__iter__')
else np.array([x])),
'text': str}
source_exp = Experiment(conn=source_conn)
source_dataset = DataSet(conn=source_conn, name="basic_copy_paste_name")
with pytest.raises(RuntimeError) as excinfo:
extract_runs_into_db(source_path, target_path, source_dataset.run_id)
assert error_caused_by(excinfo, ('Dataset not completed. An incomplete '
'dataset can not be copied.'))
for ps in some_paramspecs[1].values():
source_dataset.add_parameter(ps)
for value in range(10):
result = {ps.name: type_casters[ps.type](value)
for ps in some_paramspecs[1].values()}
source_dataset.add_result(result)
source_dataset.add_metadata('goodness', 'fair')
source_dataset.add_metadata('test', True)
source_dataset.mark_complete()
extract_runs_into_db(source_path, target_path, source_dataset.run_id)
target_exp = Experiment(conn=target_conn, exp_id=1)
length1 = len(target_exp)
# trying to insert the same run again should be a NOOP
with raise_if_file_changed(target_path):
extract_runs_into_db(source_path, target_path, source_dataset.run_id)
assert len(target_exp) == length1
target_dataset = DataSet(conn=source_conn, run_id=1)
# Now make the interesting comparisons: are the target objects the same as
# the source objects?
assert source_dataset.the_same_dataset_as(target_dataset)
source_data = source_dataset.get_data(*source_dataset.parameters.split(','))
target_data = target_dataset.get_data(*target_dataset.parameters.split(','))
assert source_data == target_data
exp_attrs = ['name', 'sample_name', 'format_string', 'started_at',
'finished_at']
for exp_attr in exp_attrs:
assert getattr(source_exp, exp_attr) == getattr(target_exp, exp_attr)
# trying to insert the same run again should be a NOOP
with raise_if_file_changed(target_path):
extract_runs_into_db(source_path, target_path, source_dataset.run_id)
|
def test_basic_extraction(two_empty_temp_db_connections, some_paramspecs):
source_conn, target_conn = two_empty_temp_db_connections
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
type_casters = {'numeric': float,
'array': (lambda x: np.array(x) if hasattr(x, '__iter__')
else np.array([x])),
'text': str}
source_exp = Experiment(conn=source_conn)
source_dataset = DataSet(conn=source_conn, name="basic_copy_paste_name")
with pytest.raises(RuntimeError) as excinfo:
extract_runs_into_db(source_path, target_path, source_dataset.run_id)
assert error_caused_by(excinfo, ('Dataset not completed. An incomplete '
'dataset can not be copied.'))
for ps in some_paramspecs[1].values():
source_dataset.add_parameter(ps)
for value in range(10):
result = {ps.name: type_casters[ps.type](value)
for ps in some_paramspecs[1].values()}
source_dataset.add_result(result)
source_dataset.add_metadata('goodness', 'fair')
source_dataset.add_metadata('test', True)
source_dataset.mark_complete()
extract_runs_into_db(source_path, target_path, source_dataset.run_id)
target_exp = Experiment(conn=target_conn, exp_id=1)
length1 = len(target_exp)
# trying to insert the same run again should be a NOOP
with raise_if_file_changed(target_path):
extract_runs_into_db(source_path, target_path, source_dataset.run_id)
assert len(target_exp) == length1
target_dataset = DataSet(conn=target_conn, run_id=1)
# Now make the interesting comparisons: are the target objects the same as
# the source objects?
assert source_dataset.the_same_dataset_as(target_dataset)
source_data = source_dataset.get_data(*source_dataset.parameters.split(','))
target_data = target_dataset.get_data(*target_dataset.parameters.split(','))
assert source_data == target_data
exp_attrs = ['name', 'sample_name', 'format_string', 'started_at',
'finished_at']
for exp_attr in exp_attrs:
assert getattr(source_exp, exp_attr) == getattr(target_exp, exp_attr)
# trying to insert the same run again should be a NOOP
with raise_if_file_changed(target_path):
extract_runs_into_db(source_path, target_path, source_dataset.run_id)
|
20,646 |
def test_inherit_overwrite():
"""On 3.6+ we check things are overwritten at import time"""
with pytest.raises(NotImplementedError):
class S(Spawner):
pass
|
def test_inherit_overwrite():
"""We check things are overwritten at import time"""
with pytest.raises(NotImplementedError):
class S(Spawner):
pass
|
32,160 |
def list_labels(user_key):
command_args = {
'userKey': user_key,
}
service = get_service(
'gmail',
'v1',
['https://www.googleapis.com/auth/gmail.readonly'])
results = service.users().labels().list(userId=user_key).execute()
labels = results.get('labels', [])
return labels
|
def list_labels(user_key):
command_args = {
'userKey': user_key,
}
service = get_service(
'gmail',
'v1',
['https://www.googleapis.com/auth/gmail.readonly'])
results = service.users().labels().list(userId=user_key).execute()
labels = results.get('labels') or []
return labels
|
34,421 |
def determine_entity_for_token(
token: Token,
entities: List[Dict[Text, Any]],
extractors: Optional[Set[Text]] = None,
) -> Optional[Dict[Text, Any]]:
"""
Determines the best fitting entity for the given token given entities that do
not overlap.
Args:
token: a single token
entities: entities found by a single extractor
extractors: list of extractors
Returns:
entity type
"""
if entities is None or len(entities) == 0:
return None
if not do_extractors_support_overlap(extractors) and do_entities_overlap(entities):
raise ValueError("The possible entities should not overlap.")
candidates = find_intersecting_entities(token, entities)
return pick_best_entity_fit(token, candidates)
|
def determine_entity_for_token(
token: Token,
entities: List[Dict[Text, Any]],
extractors: Optional[Set[Text]] = None,
) -> Optional[Dict[Text, Any]]:
"""
Determines the best fitting entity for the given token, given entities that do
not overlap.
Args:
token: a single token
entities: entities found by a single extractor
extractors: list of extractors
Returns:
entity type
"""
if entities is None or len(entities) == 0:
return None
if not do_extractors_support_overlap(extractors) and do_entities_overlap(entities):
raise ValueError("The possible entities should not overlap.")
candidates = find_intersecting_entities(token, entities)
return pick_best_entity_fit(token, candidates)
|
27,715 |
def validate_basetemp(path):
# GH 7119
cwd = pathlib.Path.cwd()
if path == "" or path == "." or str(cwd).startswith(path):
msg = "basetemp should not be '' or . or any parent folder of the cwd"
raise argparse.ArgumentTypeError(msg)
return path
|
def validate_basetemp(path):
# GH 7119
cwd = pathlib.Path.cwd()
if path == "" or path == "." or str(cwd).startswith(path):
msg = "basetemp must not be empty, the current working directory or any parent directory of it"
raise argparse.ArgumentTypeError(msg)
return path
|
45,653 |
def parse_contents(contents, filename, _):
_, content_string = contents.split(',')
decoded = base64.b64decode(content_string).decode('UTF-8')
answer = None
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded))
df = df.to_dict(orient='records')
answer = df
except Exception as e:
answer = html.Div(['There was an error processing this file.'])
print(e)
return answer
|
def parse_contents(contents, filename, _):
_, content_string = contents.split(',')
decoded = base64.b64decode(content_string).decode('UTF-8')
answer = None
try:
if filename.endswith('.csv'):
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded))
df = df.to_dict(orient='records')
answer = df
except Exception as e:
answer = html.Div(['There was an error processing this file.'])
print(e)
return answer
|
43,127 |
def clone(repo_dir, url="https://hg.mozilla.org/mozilla-central"):
if os.path.exists(repo_dir):
clean(repo_dir)
return
cmd = hglib.util.cmdbuilder(
"robustcheckout",
url,
repo_dir,
purge=True,
sharebase=repo_dir + "-shared",
networkattempts=7,
branch=b"tip",
)
cmd.insert(0, hglib.HGPATH)
proc = hglib.util.popen(cmd)
out, err = proc.communicate()
if proc.returncode:
raise hglib.error.CommandError(cmd, proc.returncode, out, err)
logger.info(f"{url} cloned")
# Remove pushlog DB to make sure it's regenerated.
try:
os.remove(os.path.join(repo_dir, ".hg", "pushlog2.db"))
except FileNotFoundError:
logger.info("pushlog database doesn't exist")
# Pull and update, to make sure the pushlog is generated.
clean(repo_dir)
|
def clone(repo_dir, url="https://hg.mozilla.org/mozilla-central"):
if os.path.exists(repo_dir):
clean(repo_dir)
return
cmd = hglib.util.cmdbuilder(
"robustcheckout",
url,
repo_dir,
purge=True,
sharebase=repo_dir + "-shared",
networkattempts=7,
branch=b"tip",
)
cmd.insert(0, hglib.HGPATH)
proc = hglib.util.popen(cmd)
out, err = proc.communicate()
if proc.returncode:
raise hglib.error.CommandError(cmd, proc.returncode, out, err)
logger.info(f"{repo_dir} cloned")
# Remove pushlog DB to make sure it's regenerated.
try:
os.remove(os.path.join(repo_dir, ".hg", "pushlog2.db"))
except FileNotFoundError:
logger.info("pushlog database doesn't exist")
# Pull and update, to make sure the pushlog is generated.
clean(repo_dir)
|
31,124 |
def main():
os.environ['PAN_CREDENTIALS_DBFILE'] = os.path.join(gettempdir(), 'pancloud_credentials.json')
params = demisto.params()
registration_id_and_url = params.get(REGISTRATION_ID_CONST).split('@')
if len(registration_id_and_url) != 2:
token_retrieval_url = "https://oproxy.demisto.ninja" # guardrails-disable-line
else:
token_retrieval_url = registration_id_and_url[1]
registration_id = registration_id_and_url[0]
# If there's a stored token in integration context, it's newer than current
refresh_token = demisto.getIntegrationContext().get(REFRESH_TOKEN_CONST) or params.get(REFRESH_TOKEN_CONST)
enc_key = params.get(ENCRYPTION_KEY_CONST)
use_ssl = not params.get('insecure', False)
proxy = params.get('proxy', False)
args = demisto.args()
fetch_table = params.get('fetch_table')
fetch_fields = params.get('fetch_fields') or '*'
command = demisto.command()
LOG(f'command is {command}')
# needs to be executed before creating a Client
if command == 'cdl-reset-failure-times':
Client.reset_failure_times()
return_outputs(readable_output="Failure time counters have been successfully reset.")
return
client = Client(token_retrieval_url, registration_id, use_ssl, proxy, refresh_token, enc_key)
try:
if command == 'test-module':
test_module(client, fetch_table, fetch_fields, params.get('isFetch'))
elif command == 'cdl-query-logs':
return_outputs(*query_logs_command(args, client))
elif command == 'cdl-get-critical-threat-logs':
return_outputs(*get_critical_logs_command(args, client))
elif command == 'cdl-get-social-applications':
return_outputs(*get_social_applications_command(args, client))
elif command == 'cdl-search-by-file-hash':
return_outputs(*search_by_file_hash_command(args, client))
elif command == 'cdl-query-traffic-logs':
return_outputs(*query_traffic_logs_command(args, client))
elif command == 'cdl-query-threat-logs':
return_outputs(*query_threat_logs_command(args, client))
elif command == 'cdl-query-url-logs':
return_outputs(*query_url_logs_command(args, client))
elif command == 'cdl-query-file-data':
return_outputs(*query_file_data_command(args, client))
elif command == 'fetch-incidents':
first_fetch_timestamp = params.get('first_fetch_timestamp', '24 hours').strip()
fetch_severity = params.get('firewall_severity')
fetch_table = params.get('fetch_table')
fetch_fields = params.get('fetch_fields') or '*'
fetch_subtype = params.get('firewall_subtype')
fetch_limit = params.get('limit')
last_run = demisto.getLastRun()
next_run, incidents = fetch_incidents(client,
first_fetch_timestamp,
fetch_severity,
fetch_table,
fetch_subtype,
fetch_fields,
fetch_limit,
last_run)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
except Exception as e:
error_message = str(e)
return_error(error_message)
|
def main():
os.environ['PAN_CREDENTIALS_DBFILE'] = os.path.join(gettempdir(), 'pancloud_credentials.json')
params = demisto.params()
registration_id_and_url = params.get(REGISTRATION_ID_CONST).split('@')
if len(registration_id_and_url) != 2:
token_retrieval_url = "https://oproxy.demisto.ninja" # guardrails-disable-line
else:
token_retrieval_url = registration_id_and_url[1]
registration_id = registration_id_and_url[0]
# If there's a stored token in integration context, it's newer than current
refresh_token = demisto.getIntegrationContext().get(REFRESH_TOKEN_CONST) or params.get(REFRESH_TOKEN_CONST)
enc_key = params.get(ENCRYPTION_KEY_CONST)
use_ssl = not params.get('insecure', False)
proxy = params.get('proxy', False)
args = demisto.args()
fetch_table = params.get('fetch_table')
fetch_fields = params.get('fetch_fields') or '*'
command = demisto.command()
LOG(f'command is {command}')
# needs to be executed before creating a Client
if command == 'cdl-reset-authentication-timeout':
Client.reset_failure_times()
return_outputs(readable_output="Failure time counters have been successfully reset.")
return
client = Client(token_retrieval_url, registration_id, use_ssl, proxy, refresh_token, enc_key)
try:
if command == 'test-module':
test_module(client, fetch_table, fetch_fields, params.get('isFetch'))
elif command == 'cdl-query-logs':
return_outputs(*query_logs_command(args, client))
elif command == 'cdl-get-critical-threat-logs':
return_outputs(*get_critical_logs_command(args, client))
elif command == 'cdl-get-social-applications':
return_outputs(*get_social_applications_command(args, client))
elif command == 'cdl-search-by-file-hash':
return_outputs(*search_by_file_hash_command(args, client))
elif command == 'cdl-query-traffic-logs':
return_outputs(*query_traffic_logs_command(args, client))
elif command == 'cdl-query-threat-logs':
return_outputs(*query_threat_logs_command(args, client))
elif command == 'cdl-query-url-logs':
return_outputs(*query_url_logs_command(args, client))
elif command == 'cdl-query-file-data':
return_outputs(*query_file_data_command(args, client))
elif command == 'fetch-incidents':
first_fetch_timestamp = params.get('first_fetch_timestamp', '24 hours').strip()
fetch_severity = params.get('firewall_severity')
fetch_table = params.get('fetch_table')
fetch_fields = params.get('fetch_fields') or '*'
fetch_subtype = params.get('firewall_subtype')
fetch_limit = params.get('limit')
last_run = demisto.getLastRun()
next_run, incidents = fetch_incidents(client,
first_fetch_timestamp,
fetch_severity,
fetch_table,
fetch_subtype,
fetch_fields,
fetch_limit,
last_run)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
except Exception as e:
error_message = str(e)
return_error(error_message)
|
51,463 |
def _inject_docstring(cls, cls_name):
cls.sum_of_weights.__doc__ = _SUM_OF_WEIGHTS_DOCSTRING.format(cls=cls_name)
cls.sum.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format(
cls=cls_name, fcn="sum", on_zero="0"
)
cls.mean.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format(
cls=cls_name, fcn="mean", on_zero="NaN"
)
cls.mean.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format(
cls=cls_name, fcn="mean", on_zero="NaN"
)
cls.mean.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format(
cls=cls_name, fcn="sum_of_squares", on_zero="0"
)
cls.mean.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format(
cls=cls_name, fcn="var", on_zero="NaN"
)
cls.mean.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format(
cls=cls_name, fcn="std", on_zero="NaN"
)
|
def _inject_docstring(cls, cls_name):
cls.sum_of_weights.__doc__ = _SUM_OF_WEIGHTS_DOCSTRING.format(cls=cls_name)
cls.sum.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format(
cls=cls_name, fcn="sum", on_zero="0"
)
cls.mean.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format(
cls=cls_name, fcn="mean", on_zero="NaN"
)
cls.sum_of_squares.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format(
cls=cls_name, fcn="sum_of_squares", on_zero="0"
)
cls.var.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format(
cls=cls_name, fcn="var", on_zero="NaN"
)
cls.std.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format(
cls=cls_name, fcn="std", on_zero="NaN"
)
|
8,519 |
def not_empty(key, data, errors, context):
value = data.get(key)
valid_values = [False, 0, 0.0]
if (not value and value not in valid_values) or value is missing:
errors[key].append(_('Missing value'))
raise StopOnError
|
def not_empty(key, data, errors, context):
value = data.get(key)
valid_values = [False, 0]
if (not value and value not in valid_values) or value is missing:
errors[key].append(_('Missing value'))
raise StopOnError
|
25,977 |
def load_arguments(self, _):
# Model imports
StorageAccountTypes = self.get_models('StorageAccountTypes')
DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes,', operation_group='disks')
SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
HyperVGenerationTypes, HyperVGeneration = self.get_models('HyperVGenerationTypes', 'HyperVGeneration')
DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes')
OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets')
RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux')
GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.")
image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name')
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
ultra_ssd_enabled_type = CLIArgumentType(
arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
scale_in_policy_type = CLIArgumentType(
nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')),
help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.'
)
edge_zone_type = CLIArgumentType(
help='The name of edge zone.',
min_api='2020-12-01',
is_preview=True
)
t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries')
shared_to_type = CLIArgumentType(
arg_type=get_enum_type(t_shared_to),
help='The query parameter to decide what shared galleries to fetch when doing listing operations. '
'If not specified, list by subscription id.'
)
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('for_upload', arg_type=get_three_state_flag(),
help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope))
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType')),
help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.')
c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.')
c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.')
operation_group = 'disks' if scope == 'disk' else 'snapshots'
c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group)))
c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.')
c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default, and it does not apply to Ultra disks.')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
c.argument('upload_size_bytes', type=int, min_api='2019-03-01',
help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520')
c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time')
c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('gallery_image_reference', help='ID of the shared galley image version from which to create a disk')
c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.')
c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra disks.')
c.argument('edge_zone', edge_zone_type)
c.argument('security_type', choices=['TrustedLaunch'], help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01')
c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01')
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01',
help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed')
c.argument('edge_zone', edge_zone_type)
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes),
help="Storage caching type for the image's data disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
c.argument('edge_zone', edge_zone_type, )
# endregion
# region Image Templates
with self.argument_context('image builder') as c:
ib_output_name_help = "Name of the image builder run output."
c.argument('location', get_location_type(self.cli_ctx))
c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."
" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'")
c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.")
c.argument('image_template_name', image_template_name_type, help="The name of the image template.")
c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image")
c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." '
'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.')
c.argument('output_name', help=ib_output_name_help)
c.ignore('destinations_lists', 'scripts_list', 'source_dict')
with self.argument_context('image builder create') as c:
ib_source_type = CLIArgumentType(arg_group="Image Source")
ib_customizer_type = CLIArgumentType(arg_group="Customizer")
ib_cutput_type = CLIArgumentType(arg_group="Output")
c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.")
c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/image-builder-json')
c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.')
# VM profile
c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)')
c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size')
c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name')
c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine')
# Image Source Arguments
c.argument('source', arg_type=ib_source_type)
c.argument('checksum', arg_type=ib_source_type)
c.argument('', arg_type=ib_source_type)
# Image Customizer Arguments
c.argument('scripts', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
# Image Output Arguments
c.argument('managed_image_destinations', arg_type=ib_cutput_type)
c.argument('shared_image_destinations', arg_type=ib_cutput_type)
c.argument('output_name', arg_type=ib_cutput_type)
with self.argument_context('image builder output') as c:
ib_sig_regions_help = "Space-separated list of regions to replicate the image version into."
ib_img_location_help = "Location where the customized image will be created."
c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.")
c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help)
c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.")
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help)
with self.argument_context('image builder output add') as c:
ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help']
ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"])
ib_default_loc_help = " Defaults to resource group's location."
c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help)
c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true')
c.argument('tags', arg_type=ib_artifact_tags_type)
c.ignore('location')
with self.argument_context('image builder customizer') as c:
ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart")
ib_win_update_type = CLIArgumentType(arg_group="Windows Update")
ib_script_type = CLIArgumentType(arg_group="Shell and Powershell")
ib_powershell_type = CLIArgumentType(arg_group="Powershell")
ib_file_customizer_type = CLIArgumentType(arg_group="File")
c.argument('customizer_name', help="Name of the customizer.")
c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType))
# Script Args
c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.")
c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.")
# Powershell Specific Args
c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers")
# Windows Restart Specific Args
c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.")
c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.")
c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m")
# Windows Update Specific Args
c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.')
c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)')
c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)')
# File Args
c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.")
c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image")
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable secure boot.')
c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable vTPM.')
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE):
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine")
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.')
c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01')
c.argument('platform_fault_domain', min_api='2020-06-01',
help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view')
c.argument('count', type=int, is_preview=True,
help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.')
c.argument('security_type', arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01',
help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.')
c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable secure boot. It is part of trusted launch.')
c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable vTPM. It is part of trusted launch.')
c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
with self.argument_context('vm create', arg_group='Storage') as c:
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
for scope in ['vm show', 'vmss show']:
with self.argument_context(scope) as c:
c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01')
for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']:
with self.argument_context(scope) as c:
c.ignore('include_user_data')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm install-patches') as c:
c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)')
c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.')
c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.')
c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.')
c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only')
c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only')
c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help="Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only")
c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')
c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', deprecate_info=c.deprecate(expiration='3.0.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'])
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
with self.argument_context('vm image terms') as c:
c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted')
c.argument('publisher', help='Image publisher')
c.argument('offer', help='Image offer')
c.argument('plan', help='Image billing plan')
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
with self.argument_context('vm host') as c:
c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group")
c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host")
c.ignore('expand')
with self.argument_context('vm host create') as c:
c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int,
help="Fault domain of the host within a group. Allowed values: 0, 1, 2")
c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(),
help="Replace the host automatically if a failure occurs")
c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes),
help="The software license type that will be applied to the VMs deployed on the dedicated host.")
c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/en-us/pricing/details/virtual-machines/dedicated-host/")
with self.argument_context('vm host list') as c:
c.argument('host_group_name', id_part=None)
with self.argument_context('vm host group') as c:
c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group")
c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Specify whether virtual machines or virtual machine scale sets can be placed automatically '
'on the dedicated host group. Automatic placement means resources are allocated on dedicated '
'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to '
'false when not provided.')
with self.argument_context('vm host group create') as c:
c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int,
help="Number of fault domains that the host group can span.")
c.argument('zones', zone_type)
for scope in ["vm host", "vm host group"]:
with self.argument_context("{} create".format(scope)) as c:
location_type = get_location_type(self.cli_ctx)
custom_location_msg = " Otherwise, location will default to the resource group's location"
custom_location_type = CLIArgumentType(overrides=location_type,
help=location_type.settings["help"] + custom_location_msg)
c.argument('location', arg_type=custom_location_type)
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
c.argument('host_group', min_api='2020-06-01',
help='Name or ID of dedicated host group that the virtual machine scale set resides in')
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c:
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group."
" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.")
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set")
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long')
c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.',
arg_type=get_enum_type(['Uniform', 'Flexible']), is_preview=True)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('automatic_repairs_grace_period', min_api='2018-10-01',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.')
c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('network_api_version', is_preview=True, min_api='2021-03-01',
help="Specify the Microsoft.Network API version used when creating networking resources in the Network "
"Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Possible "
"value is 2020-11-01.")
c.argument('spot_restore_enabled', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enables the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vmss update') as c:
protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01')
c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.")
c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).")
c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(),
help='Enable terminate notification')
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('spot_restore_enabled', arg_type=get_three_state_flag(), min_api='2021-04-01',
help='Enables the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
c.argument('spot_restore_timeout', min_api='2021-04-01',
help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c:
c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs')
c.argument(
'automatic_repairs_grace_period',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.'
)
for scope in ['vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('terminate_notification_time', min_api='2019-03-01',
help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted')
c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%')
c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%')
c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%')
c.argument('pause_time_between_batches', min_api='2020-12-01',
help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds')
c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size')
c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances')
for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
with self.argument_context(scope) as c:
c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix))
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss set-orchestration-service-state') as c:
c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.')
c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.')
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade',
options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')],
arg_type=get_three_state_flag(),
help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file")
with self.argument_context('{} stop'.format(scope)) as c:
c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
c.argument('edge_zone', edge_zone_type)
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.')
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if DiskStorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.')
c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01',
help='Names or IDs (space delimited) of disk encryption sets for data disks.')
c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.')
c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.')
c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01',
help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.')
c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', '--data-delete-option'],
nargs='+', min_api='2021-03-01',
help='Specify whether data disk should be deleted or detached upon VM deletion.')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'',
default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
c.argument('nic_delete_option', nargs='+', min_api='2021-03-01',
help='Specify what happens to the network interface when the VM is deleted. Use a singular '
'value to apply on all resources, or use <Name>=<Value> to configure '
'the delete behavior for individual resources. Possible options are Delete and Detach.')
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access")
c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have")
c.ignore('identity_role_id')
with self.argument_context('vm auto-shutdown') as c:
c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.')
c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)')
c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730')
c.argument('webhook', help='The webhook URL to which the notification will be sent')
c.argument('location', validator=get_default_location_from_resource_group)
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.')
c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(),
help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.')
with self.argument_context('vm extension set') as c:
c.argument('vm_extension_name', name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'),
help='Name of the extension.', id_part=None)
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \
"To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \
"To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \
"For more information see the Azure Windows VM online docs."
c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'None']))
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True,
help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons')
with self.argument_context('vm update') as c:
c.argument('license_type', help=license_msg, arg_type=get_enum_type(
['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_ELS_6', 'None']))
c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
with self.argument_context('vmss create') as c:
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('shared_to', shared_to_type)
with self.argument_context('sig show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
for scope in ['sig share add', 'sig share remove']:
with self.argument_context(scope) as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.')
c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.')
with self.argument_context('sig share add') as c:
c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
with self.argument_context('sig share remove') as c:
c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
with self.argument_context('sig share reset') as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.")
c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"')
with self.argument_context('sig image-definition list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('shared_to', shared_to_type)
with self.argument_context('sig image-definition show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
min_api='2020-09-30', is_experimental=True,
help='This property allows you to specify the permission of sharing gallery.')
with self.argument_context('sig update') as c:
c.ignore('gallery')
c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
min_api='2020-09-30', is_experimental=True,
help='This property allows you to specify the permission of sharing gallery.')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('os_snapshot', help='Name or ID of OS disk snapshot')
c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots')
c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions",
arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01')
c.argument('target_region_encryption', nargs='+',
help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.')
c.argument('os_vhd_uri', help='Source VHD URI of OS disk')
c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk')
c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks')
c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks')
c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks')
with self.argument_context('sig image-version list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
c.argument('shared_to', shared_to_type)
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
with self.argument_context('sig image-version show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The '
'name of the gallery image version to be created. Needs to follow semantic version name pattern: '
'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. '
'Format: <MajorVersion>.<MinorVersion>.<Patch>', id_part='child_name_3')
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace,
help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. '
'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
# endregion
# region Proximity Placement Group
with self.argument_context('ppg', min_api='2018-04-01') as c:
c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.")
with self.argument_context('ppg create', min_api='2018-04-01') as c:
c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.")
c.argument('tags', tags_type)
with self.argument_context('ppg show', min_api='2019-07-01') as c:
c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.')
for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'),
('vm availability-set create', 'availability set'),
('vm update', 'VM'), ('vmss update', 'VMSS'),
('vm availability-set update', 'availability set')]:
with self.argument_context(scope, min_api='2018-04-01') as c:
c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item),
validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added.
# endregion
# region VM Monitor
with self.argument_context('vm monitor log show') as c:
c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.")
c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.")
with self.argument_context('vm monitor metrics') as c:
c.argument('metricnamespace', options_list=['--namespace'],
help='Namespace to query metric definitions for.')
with self.argument_context('vm monitor metrics tail') as c:
from azure.mgmt.monitor.models import AggregationType
c.extra('resource_group_name', required=True)
c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
c.argument('metadata', action='store_true')
c.argument('dimension', nargs='*', validator=validate_metric_dimension)
c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*')
c.argument('metrics', nargs='*')
c.argument('orderby',
help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc')
c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.')
c.argument('filters', options_list=['--filter'])
c.argument('metric_namespace', options_list=['--namespace'])
with self.argument_context('vm monitor metrics tail', arg_group='Time') as c:
c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.'))
c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.'))
c.argument('offset', type=get_period_type(as_timedelta=True))
c.argument('interval', arg_group='Time', type=get_period_type())
with self.argument_context('vm monitor metrics list-definitions') as c:
c.extra('resource_group_name', required=True)
c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
# endregion
# region disk encryption set
with self.argument_context('disk-encryption-set') as c:
c.argument('disk_encryption_set_name', disk_encryption_set_name)
c.argument('key_url', help='URL pointing to a key or secret in KeyVault.')
c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.')
c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']),
help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01',
options_list=['--enable-auto-key-rotation', '--auto-rotation'],
help='Enable automatic rotation of keys.')
# endregion
# region DiskAccess
with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c:
c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endRegion
|
def load_arguments(self, _):
# Model imports
StorageAccountTypes = self.get_models('StorageAccountTypes')
DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes,', operation_group='disks')
SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
HyperVGenerationTypes, HyperVGeneration = self.get_models('HyperVGenerationTypes', 'HyperVGeneration')
DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes')
OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets')
RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux')
GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.")
image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name')
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
ultra_ssd_enabled_type = CLIArgumentType(
arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
scale_in_policy_type = CLIArgumentType(
nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')),
help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.'
)
edge_zone_type = CLIArgumentType(
help='The name of edge zone.',
min_api='2020-12-01',
is_preview=True
)
t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries')
shared_to_type = CLIArgumentType(
arg_type=get_enum_type(t_shared_to),
help='The query parameter to decide what shared galleries to fetch when doing listing operations. '
'If not specified, list by subscription id.'
)
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('for_upload', arg_type=get_three_state_flag(),
help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope))
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType')),
help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.')
c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.')
c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.')
operation_group = 'disks' if scope == 'disk' else 'snapshots'
c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group)))
c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.')
c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default, and it does not apply to Ultra disks.')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
c.argument('upload_size_bytes', type=int, min_api='2019-03-01',
help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520')
c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time')
c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('gallery_image_reference', help='ID of the shared galley image version from which to create a disk')
c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.')
c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra disks.')
c.argument('edge_zone', edge_zone_type)
c.argument('security_type', choices=['TrustedLaunch'], help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01')
c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01')
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01',
help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed')
c.argument('edge_zone', edge_zone_type)
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes),
help="Storage caching type for the image's data disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
c.argument('edge_zone', edge_zone_type, )
# endregion
# region Image Templates
with self.argument_context('image builder') as c:
ib_output_name_help = "Name of the image builder run output."
c.argument('location', get_location_type(self.cli_ctx))
c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."
" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'")
c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.")
c.argument('image_template_name', image_template_name_type, help="The name of the image template.")
c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image")
c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." '
'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.')
c.argument('output_name', help=ib_output_name_help)
c.ignore('destinations_lists', 'scripts_list', 'source_dict')
with self.argument_context('image builder create') as c:
ib_source_type = CLIArgumentType(arg_group="Image Source")
ib_customizer_type = CLIArgumentType(arg_group="Customizer")
ib_cutput_type = CLIArgumentType(arg_group="Output")
c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.")
c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/image-builder-json')
c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.')
# VM profile
c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)')
c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size')
c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name')
c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine')
# Image Source Arguments
c.argument('source', arg_type=ib_source_type)
c.argument('checksum', arg_type=ib_source_type)
c.argument('', arg_type=ib_source_type)
# Image Customizer Arguments
c.argument('scripts', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
# Image Output Arguments
c.argument('managed_image_destinations', arg_type=ib_cutput_type)
c.argument('shared_image_destinations', arg_type=ib_cutput_type)
c.argument('output_name', arg_type=ib_cutput_type)
with self.argument_context('image builder output') as c:
ib_sig_regions_help = "Space-separated list of regions to replicate the image version into."
ib_img_location_help = "Location where the customized image will be created."
c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.")
c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help)
c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.")
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help)
with self.argument_context('image builder output add') as c:
ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help']
ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"])
ib_default_loc_help = " Defaults to resource group's location."
c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help)
c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true')
c.argument('tags', arg_type=ib_artifact_tags_type)
c.ignore('location')
with self.argument_context('image builder customizer') as c:
ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart")
ib_win_update_type = CLIArgumentType(arg_group="Windows Update")
ib_script_type = CLIArgumentType(arg_group="Shell and Powershell")
ib_powershell_type = CLIArgumentType(arg_group="Powershell")
ib_file_customizer_type = CLIArgumentType(arg_group="File")
c.argument('customizer_name', help="Name of the customizer.")
c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType))
# Script Args
c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.")
c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.")
# Powershell Specific Args
c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers")
# Windows Restart Specific Args
c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.")
c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.")
c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m")
# Windows Update Specific Args
c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.')
c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)')
c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)')
# File Args
c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.")
c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image")
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable secure boot.')
c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable vTPM.')
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE):
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine")
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.')
c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01')
c.argument('platform_fault_domain', min_api='2020-06-01',
help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view')
c.argument('count', type=int, is_preview=True,
help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.')
c.argument('security_type', arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01',
help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.')
c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable secure boot. It is part of trusted launch.')
c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable vTPM. It is part of trusted launch.')
c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
with self.argument_context('vm create', arg_group='Storage') as c:
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
for scope in ['vm show', 'vmss show']:
with self.argument_context(scope) as c:
c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01')
for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']:
with self.argument_context(scope) as c:
c.ignore('include_user_data')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm install-patches') as c:
c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)')
c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.')
c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.')
c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.')
c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only')
c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only')
c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help="Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only")
c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')
c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', deprecate_info=c.deprecate(expiration='3.0.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'])
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
with self.argument_context('vm image terms') as c:
c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted')
c.argument('publisher', help='Image publisher')
c.argument('offer', help='Image offer')
c.argument('plan', help='Image billing plan')
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
with self.argument_context('vm host') as c:
c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group")
c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host")
c.ignore('expand')
with self.argument_context('vm host create') as c:
c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int,
help="Fault domain of the host within a group. Allowed values: 0, 1, 2")
c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(),
help="Replace the host automatically if a failure occurs")
c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes),
help="The software license type that will be applied to the VMs deployed on the dedicated host.")
c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/en-us/pricing/details/virtual-machines/dedicated-host/")
with self.argument_context('vm host list') as c:
c.argument('host_group_name', id_part=None)
with self.argument_context('vm host group') as c:
c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group")
c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Specify whether virtual machines or virtual machine scale sets can be placed automatically '
'on the dedicated host group. Automatic placement means resources are allocated on dedicated '
'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to '
'false when not provided.')
with self.argument_context('vm host group create') as c:
c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int,
help="Number of fault domains that the host group can span.")
c.argument('zones', zone_type)
for scope in ["vm host", "vm host group"]:
with self.argument_context("{} create".format(scope)) as c:
location_type = get_location_type(self.cli_ctx)
custom_location_msg = " Otherwise, location will default to the resource group's location"
custom_location_type = CLIArgumentType(overrides=location_type,
help=location_type.settings["help"] + custom_location_msg)
c.argument('location', arg_type=custom_location_type)
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
c.argument('host_group', min_api='2020-06-01',
help='Name or ID of dedicated host group that the virtual machine scale set resides in')
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c:
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group."
" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.")
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set")
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long')
c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.',
arg_type=get_enum_type(['Uniform', 'Flexible']), is_preview=True)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('automatic_repairs_grace_period', min_api='2018-10-01',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.')
c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('network_api_version', is_preview=True, min_api='2021-03-01',
help="Specify the Microsoft.Network API version used when creating networking resources in the Network "
"Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Possible "
"value is 2020-11-01.")
c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vmss update') as c:
protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01')
c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.")
c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).")
c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(),
help='Enable terminate notification')
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('spot_restore_enabled', arg_type=get_three_state_flag(), min_api='2021-04-01',
help='Enables the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
c.argument('spot_restore_timeout', min_api='2021-04-01',
help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c:
c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs')
c.argument(
'automatic_repairs_grace_period',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.'
)
for scope in ['vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('terminate_notification_time', min_api='2019-03-01',
help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted')
c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%')
c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%')
c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%')
c.argument('pause_time_between_batches', min_api='2020-12-01',
help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds')
c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size')
c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances')
for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
with self.argument_context(scope) as c:
c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix))
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss set-orchestration-service-state') as c:
c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.')
c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.')
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade',
options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')],
arg_type=get_three_state_flag(),
help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file")
with self.argument_context('{} stop'.format(scope)) as c:
c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
c.argument('edge_zone', edge_zone_type)
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.')
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if DiskStorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.')
c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01',
help='Names or IDs (space delimited) of disk encryption sets for data disks.')
c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.')
c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.')
c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01',
help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.')
c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', '--data-delete-option'],
nargs='+', min_api='2021-03-01',
help='Specify whether data disk should be deleted or detached upon VM deletion.')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'',
default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
c.argument('nic_delete_option', nargs='+', min_api='2021-03-01',
help='Specify what happens to the network interface when the VM is deleted. Use a singular '
'value to apply on all resources, or use <Name>=<Value> to configure '
'the delete behavior for individual resources. Possible options are Delete and Detach.')
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access")
c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have")
c.ignore('identity_role_id')
with self.argument_context('vm auto-shutdown') as c:
c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.')
c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)')
c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730')
c.argument('webhook', help='The webhook URL to which the notification will be sent')
c.argument('location', validator=get_default_location_from_resource_group)
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.')
c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(),
help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.')
with self.argument_context('vm extension set') as c:
c.argument('vm_extension_name', name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'),
help='Name of the extension.', id_part=None)
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \
"To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \
"To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \
"For more information see the Azure Windows VM online docs."
c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'None']))
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True,
help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons')
with self.argument_context('vm update') as c:
c.argument('license_type', help=license_msg, arg_type=get_enum_type(
['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_ELS_6', 'None']))
c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
with self.argument_context('vmss create') as c:
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('shared_to', shared_to_type)
with self.argument_context('sig show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
for scope in ['sig share add', 'sig share remove']:
with self.argument_context(scope) as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.')
c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.')
with self.argument_context('sig share add') as c:
c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
with self.argument_context('sig share remove') as c:
c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
with self.argument_context('sig share reset') as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.")
c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"')
with self.argument_context('sig image-definition list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('shared_to', shared_to_type)
with self.argument_context('sig image-definition show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
min_api='2020-09-30', is_experimental=True,
help='This property allows you to specify the permission of sharing gallery.')
with self.argument_context('sig update') as c:
c.ignore('gallery')
c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
min_api='2020-09-30', is_experimental=True,
help='This property allows you to specify the permission of sharing gallery.')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('os_snapshot', help='Name or ID of OS disk snapshot')
c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots')
c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions",
arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01')
c.argument('target_region_encryption', nargs='+',
help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.')
c.argument('os_vhd_uri', help='Source VHD URI of OS disk')
c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk')
c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks')
c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks')
c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks')
with self.argument_context('sig image-version list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
c.argument('shared_to', shared_to_type)
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
with self.argument_context('sig image-version show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The '
'name of the gallery image version to be created. Needs to follow semantic version name pattern: '
'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. '
'Format: <MajorVersion>.<MinorVersion>.<Patch>', id_part='child_name_3')
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace,
help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. '
'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
# endregion
# region Proximity Placement Group
with self.argument_context('ppg', min_api='2018-04-01') as c:
c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.")
with self.argument_context('ppg create', min_api='2018-04-01') as c:
c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.")
c.argument('tags', tags_type)
with self.argument_context('ppg show', min_api='2019-07-01') as c:
c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.')
for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'),
('vm availability-set create', 'availability set'),
('vm update', 'VM'), ('vmss update', 'VMSS'),
('vm availability-set update', 'availability set')]:
with self.argument_context(scope, min_api='2018-04-01') as c:
c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item),
validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added.
# endregion
# region VM Monitor
with self.argument_context('vm monitor log show') as c:
c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.")
c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.")
with self.argument_context('vm monitor metrics') as c:
c.argument('metricnamespace', options_list=['--namespace'],
help='Namespace to query metric definitions for.')
with self.argument_context('vm monitor metrics tail') as c:
from azure.mgmt.monitor.models import AggregationType
c.extra('resource_group_name', required=True)
c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
c.argument('metadata', action='store_true')
c.argument('dimension', nargs='*', validator=validate_metric_dimension)
c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*')
c.argument('metrics', nargs='*')
c.argument('orderby',
help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc')
c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.')
c.argument('filters', options_list=['--filter'])
c.argument('metric_namespace', options_list=['--namespace'])
with self.argument_context('vm monitor metrics tail', arg_group='Time') as c:
c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.'))
c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.'))
c.argument('offset', type=get_period_type(as_timedelta=True))
c.argument('interval', arg_group='Time', type=get_period_type())
with self.argument_context('vm monitor metrics list-definitions') as c:
c.extra('resource_group_name', required=True)
c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
# endregion
# region disk encryption set
with self.argument_context('disk-encryption-set') as c:
c.argument('disk_encryption_set_name', disk_encryption_set_name)
c.argument('key_url', help='URL pointing to a key or secret in KeyVault.')
c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.')
c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']),
help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01',
options_list=['--enable-auto-key-rotation', '--auto-rotation'],
help='Enable automatic rotation of keys.')
# endregion
# region DiskAccess
with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c:
c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endRegion
|
346 |
def _check_install_compatibilitites():
try:
import theano
print(
"!" * 60
+ f"\nYour Python environment has Theano(-PyMC) {theano.__version__} installed, "
+ f"but you are importing PyMC {__version__} which uses Aesara as its backend."
+ f"\nFor PyMC {__version__} to work as expected you should uninstall Theano(-PyMC)."
+ "\nSee https://github.com/pymc-devs/pymc3/wiki for installation instructions.\n"
+ "!" * 60
)
except ImportError:
pass
try:
import pymc3
print(
"!" * 60
+ f"\nYou are importing PyMC {__version__}, but your environment also has"
+ f" the legacy version PyMC3 {pymc3.__version__} installed."
+ f"\nFor PyMC {__version__} to work as expected you should uninstall PyMC3."
+ "\nSee https://github.com/pymc-devs/pymc3/wiki for installation instructions.\n"
+ "!" * 60
)
except ImportError:
pass
|
def _check_install_compatibilitites():
try:
import theano
print(
"!" * 60
+ f"\nYour Python environment has Theano(-PyMC) {theano.__version__} installed, "
+ f"but you are importing PyMC {__version__} which uses Aesara as its backend."
+ f"\nFor PyMC {__version__} to work as expected you should uninstall Theano(-PyMC)."
+ "\nSee https://github.com/pymc-devs/pymc3/wiki for installation instructions.\n"
+ "!" * 60
)
except ImportError:
pass
try:
import pymc3
print(
"!" * 60
+ f"\nYou are importing PyMC {__version__}, but your environment also has"
+ f" the legacy version PyMC3 {pymc3.__version__} installed."
+ f"\nFor PyMC {__version__} to work as expected you should uninstall PyMC3."
+ "\nSee https://github.com/pymc-devs/pymc/wiki for update instructions.\n"
+ "!" * 60
)
except ImportError:
pass
|
10,858 |
def get_db(alias=DEFAULT_CONNECTION_NAME, reconnect=False):
if reconnect:
disconnect(alias)
if alias not in _dbs:
conn = get_connection(alias)
conn_settings = _connection_settings[alias]
db = conn[conn_settings["name"]]
# Authenticate if necessary
if (
PYMONGO_VERSION < (4,)
and conn_settings["username"]
and (
conn_settings["password"]
or conn_settings["authentication_mechanism"] == "MONGODB-X509"
)
and ("authmechanismproperties" not in conn_settings.keys())
):
auth_kwargs = {"source": conn_settings["authentication_source"]}
if conn_settings["authentication_mechanism"] is not None:
auth_kwargs["mechanism"] = conn_settings["authentication_mechanism"]
db.authenticate(
conn_settings["username"], conn_settings["password"], **auth_kwargs
)
_dbs[alias] = db
return _dbs[alias]
|
def get_db(alias=DEFAULT_CONNECTION_NAME, reconnect=False):
if reconnect:
disconnect(alias)
if alias not in _dbs:
conn = get_connection(alias)
conn_settings = _connection_settings[alias]
db = conn[conn_settings["name"]]
# Authenticate if necessary
if (
PYMONGO_VERSION < (4,)
and conn_settings["username"]
and (
conn_settings["password"]
or conn_settings["authentication_mechanism"] == "MONGODB-X509"
)
and "authmechanismproperties" not in conn_settings
):
auth_kwargs = {"source": conn_settings["authentication_source"]}
if conn_settings["authentication_mechanism"] is not None:
auth_kwargs["mechanism"] = conn_settings["authentication_mechanism"]
db.authenticate(
conn_settings["username"], conn_settings["password"], **auth_kwargs
)
_dbs[alias] = db
return _dbs[alias]
|
2,833 |
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""Compute the polynomial kernel between X and Y.
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
degree : int, default=3
Kernel degree.
gamma : float, default=None
Slope. If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Intercept.
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
The polynomial kernel.
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
|
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""Compute the polynomial kernel between X and Y.
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
degree : int, default=3
Kernel degree.
gamma : float, default=None
Slope. If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Constant offset added to scaled inner product.
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
The polynomial kernel.
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
|
50,120 |
def get_interfaces(blacklist_drivers=None) -> list:
"""Return list of interface tuples (name, mac, driver, device_id)
Bridges and any devices that have a 'stolen' mac are excluded."""
ret = []
devs = get_devicelist()
# 16 somewhat arbitrarily chosen. Normally a mac is 6 '00:' tokens.
zero_mac = ':'.join(('00',) * 16)
for name in devs:
if not interface_has_own_mac(name):
continue
if is_bridge(name):
continue
if is_vlan(name):
continue
if is_bond(name):
continue
if get_master(name) is not None and not master_is_bridge_or_bond(name):
continue
if is_netfailover(name):
continue
mac = get_interface_mac(name)
# some devices may not have a mac (tun0)
if not mac:
continue
# skip nics that have no mac (00:00....)
if name != 'lo' and mac == zero_mac[:len(mac)]:
continue
# skip nics that have drivers blacklisted
driver = device_driver(name)
if blacklist_drivers is not None and driver in blacklist_drivers:
continue
ret.append((name, mac, driver, device_devid(name)))
return ret
|
def get_interfaces(blacklist_drivers=None) -> list:
"""Return list of interface tuples (name, mac, driver, device_id)
Bridges and any devices that have a 'stolen' mac are excluded."""
ret = []
devs = get_devicelist()
# 16 somewhat arbitrarily chosen. Normally a mac is 6 '00:' tokens.
zero_mac = ':'.join(('00',) * 16)
for name in devs:
if not interface_has_own_mac(name):
continue
if is_bridge(name):
continue
if is_vlan(name):
continue
if is_bond(name):
continue
if get_master(name) is not None and not master_is_bridge_or_bond(name):
continue
if is_netfailover(name):
continue
mac = get_interface_mac(name)
# some devices may not have a mac (tun0)
if not mac:
continue
# skip nics that have no mac (00:00....)
if name != 'lo' and mac == zero_mac[:len(mac)]:
continue
# skip nics that have drivers blacklisted
driver = device_driver(name)
if driver in blacklist_drivers:
continue
ret.append((name, mac, driver, device_devid(name)))
return ret
|
55,024 |
def pauli_mult_with_phase(pauli_1, pauli_2, wire_map=None):
r"""Multiply two Pauli words together including the global phase.
Two Pauli operations can be multiplied together by taking the additive
OR of their binary symplectic representations. The phase is computed by
looking at the number of times we have the products XY, YZ, or ZX (adds a
phase of :math:`i`), or YX, ZY, XZ (adds a phase of :math:`-i`).
Args:
pauli_1 (qml.Operation): A Pauli word.
pauli_2 (qml.Operation): A Pauli word to multiply with the first one.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli
word as keys, and unique integer labels as their values. If no wire map is
provided, the map will be constructed from the set of wires acted on
by the input Pauli words.
Returns:
(qml.Operation, np.complex): The product of pauli_1 and pauli_2, and the
global phase.
**Example**
This function works the same as ``pauli_mult`` but also returns the global
phase accumulated as a result of the Pauli product rules
:math:`\sigma_i \sigma_j = i \sigma_k`.
.. code-block:: python
import pennylane as qml
from pennylane.grouping.pauli_group import pauli_mult
pauli_1 = qml.PauliX(0) @ qml.PauliZ(1)
pauli_2 = qml.PauliY(0) @ qml.PauliZ(1)
product, phase = pauli_mult_with_phase(pauli_1, pauli_2)
will yield ``qml.PauliZ(0)`` and :math:`1j`.
"""
# If no wire map is specified, generate one from the union of wires
# in both Paulis.
if wire_map is None:
wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels)
wire_map = {label: i for i, label in enumerate(wire_labels)}
# Get the product; use our earlier function
pauli_product = pauli_mult(pauli_1, pauli_2, wire_map)
pauli_1_names = [pauli_1.name] if isinstance(pauli_1.name, str) else pauli_1.name
pauli_2_names = [pauli_2.name] if isinstance(pauli_2.name, str) else pauli_2.name
pauli_1_placeholder = 0
pauli_2_placeholder = 0
phase = 1
for wire in wire_map.keys():
if wire in pauli_1.wires:
pauli_1_op_name = pauli_1_names[pauli_1_placeholder]
pauli_1_placeholder += 1
else:
pauli_1_op_name = "Identity"
if wire in pauli_2.wires:
pauli_2_op_name = pauli_2_names[pauli_2_placeholder]
pauli_2_placeholder += 1
else:
pauli_2_op_name = "Identity"
# If we have identities anywhere we don't pick up a phase
if pauli_1_op_name == "Identity" or pauli_2_op_name == "Identity":
continue
# Likewise, no additional phase if the Paulis are the same
if pauli_1_op_name == pauli_2_op_name:
continue
# Use Pauli commutation rules to determine the phase
pauli_ordering = (pauli_1_op_name, pauli_2_op_name)
if pauli_ordering in [("PauliX", "PauliY"), ("PauliY", "PauliZ"), ("PauliZ", "PauliX")]:
phase *= 1j
else:
phase *= -1j
return pauli_product, phase
|
def pauli_mult_with_phase(pauli_1, pauli_2, wire_map=None):
r"""Multiply two Pauli words together including the global phase.
Two Pauli operations can be multiplied together by taking the additive
OR of their binary symplectic representations. The phase is computed by
looking at the number of times we have the products XY, YZ, or ZX (adds a
phase of :math:`i`), or YX, ZY, XZ (adds a phase of :math:`-i`).
Args:
pauli_1 (qml.Operation): A Pauli word.
pauli_2 (qml.Operation): A Pauli word to multiply with the first one.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli
word as keys, and unique integer labels as their values. If no wire map is
provided, the map will be constructed from the set of wires acted on
by the input Pauli words.
Returns:
(qml.Operation, np.complex): The product of pauli_1 and pauli_2, and the
global phase.
**Example**
This function works the same as ``pauli_mult`` but also returns the global
phase accumulated as a result of the Pauli product rules
:math:`\sigma_i \sigma_j = i \sigma_k`.
>>> from pennylane.grouping.pauli_group import pauli_mult_with_phase
>>> pauli_1 = qml.PauliX(0) @ qml.PauliZ(1)
>>> pauli_2 = qml.PauliY(0) @ qml.PauliZ(1)
>>> product, phase = pauli_mult_with_phase(pauli_1, pauli_2)
>>> product
PauliZ(wires=[0])
>>> phase
1j
"""
# If no wire map is specified, generate one from the union of wires
# in both Paulis.
if wire_map is None:
wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels)
wire_map = {label: i for i, label in enumerate(wire_labels)}
# Get the product; use our earlier function
pauli_product = pauli_mult(pauli_1, pauli_2, wire_map)
pauli_1_names = [pauli_1.name] if isinstance(pauli_1.name, str) else pauli_1.name
pauli_2_names = [pauli_2.name] if isinstance(pauli_2.name, str) else pauli_2.name
pauli_1_placeholder = 0
pauli_2_placeholder = 0
phase = 1
for wire in wire_map.keys():
if wire in pauli_1.wires:
pauli_1_op_name = pauli_1_names[pauli_1_placeholder]
pauli_1_placeholder += 1
else:
pauli_1_op_name = "Identity"
if wire in pauli_2.wires:
pauli_2_op_name = pauli_2_names[pauli_2_placeholder]
pauli_2_placeholder += 1
else:
pauli_2_op_name = "Identity"
# If we have identities anywhere we don't pick up a phase
if pauli_1_op_name == "Identity" or pauli_2_op_name == "Identity":
continue
# Likewise, no additional phase if the Paulis are the same
if pauli_1_op_name == pauli_2_op_name:
continue
# Use Pauli commutation rules to determine the phase
pauli_ordering = (pauli_1_op_name, pauli_2_op_name)
if pauli_ordering in [("PauliX", "PauliY"), ("PauliY", "PauliZ"), ("PauliZ", "PauliX")]:
phase *= 1j
else:
phase *= -1j
return pauli_product, phase
|
35,653 |
def _mobilenet_v3(
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
weights: bool,
progress: bool,
**kwargs: Any,
) -> MobileNetV3:
if weights is not None:
kwargs["num_classes"] = len(weights.meta["categories"])
model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
if weights is not None:
model.load_state_dict(weights.state_dict(progress=progress))
return model
|
def _mobilenet_v3(
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
weights: Optional[Weights],
progress: bool,
**kwargs: Any,
) -> MobileNetV3:
if weights is not None:
kwargs["num_classes"] = len(weights.meta["categories"])
model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
if weights is not None:
model.load_state_dict(weights.state_dict(progress=progress))
return model
|
5,495 |
def test_delete_user_no_revisions_but_attachment_revisions_donate(
db, user_client, wiki_user, django_user_model
):
"""
This test is based on the bug report
https://github.com/mdn/kuma/issues/6479
The user didn't have any revisions to confront the legacy of, but there might be
other things attached to the user.
"""
other_user = django_user_model.objects.create(
username="other", email="[email protected]"
)
assert not Revision.objects.filter(creator=wiki_user).update(creator=other_user)
attachment_revision = AttachmentRevision(
attachment=Attachment.objects.create(title="test attachment"),
file="some/path.ext",
mime_type="application/kuma",
creator=wiki_user,
title="test attachment",
)
attachment_revision.save()
url = reverse("users.user_delete", kwargs={"username": wiki_user.username})
response = user_client.post(url, HTTP_HOST=settings.WIKI_HOST)
# This means it didn't work! The form rejects.
assert response.status_code == 200
# Ok, let's donate the attachment revisions to "Anonymous"
response = user_client.post(
url, {"attributions": "donate"}, HTTP_HOST=settings.WIKI_HOST
)
# This means it didn't work! The form rejects.
assert response.status_code == 302
with pytest.raises(User.DoesNotExist):
wiki_user.refresh_from_db()
attachment_revision.refresh_from_db()
assert attachment_revision.creator.username == "Anonymous"
|
def test_delete_user_no_revisions_but_attachment_revisions_donate(
db, user_client, wiki_user, django_user_model
):
"""
This test is based on the bug report
https://github.com/mdn/kuma/issues/6479
The user didn't have any revisions to confront the legacy of, but there might be
other things attached to the user.
"""
other_user = django_user_model.objects.create(
username="other", email="[email protected]"
)
assert not Revision.objects.filter(creator=wiki_user).update(creator=other_user)
attachment_revision = AttachmentRevision(
attachment=Attachment.objects.create(title="test attachment"),
file="some/path.ext",
mime_type="application/kuma",
creator=wiki_user,
title="test attachment",
)
attachment_revision.save()
url = reverse("users.user_delete", kwargs={"username": wiki_user.username})
response = user_client.post(url, HTTP_HOST=settings.WIKI_HOST)
# This means it didn't work! The form rejects.
assert response.status_code == 200
# Ok, let's donate the attachment revisions to "Anonymous"
response = user_client.post(
url, {"attributions": "donate"}, HTTP_HOST=settings.WIKI_HOST
)
# This means it worked! The user's attributions have been donated to the Anonymous user.
assert response.status_code == 302
with pytest.raises(User.DoesNotExist):
wiki_user.refresh_from_db()
attachment_revision.refresh_from_db()
assert attachment_revision.creator.username == "Anonymous"
|
1,074 |
def compute_noise_components(imgseries, mask_images, num_components,
filter_type, degree, period_cut, repetition_time):
"""Compute the noise components from the imgseries for each mask
imgseries: a nibabel img
mask_images: a list of nibabel images
num_components: number of noise components to return
filter_type: type off filter to apply to time series before computing
noise components.
'polynomial' - Legendre polynomial basis
'cosine' - Discrete cosine (DCT) basis
False - None (mean-removal only)
Filter options:
degree: order of polynomial used to remove trends from the timeseries
period_cut: minimum period (in sec) for DCT high-pass filter
repetition_time: time (in sec) between volume acquisitions
returns:
components: a numpy array
basis: a numpy array containing the (non-constant) filter regressors
"""
components = None
basis = np.array([])
for img in mask_images:
mask = img.get_data().astype(np.bool)
if imgseries.shape[:3] != mask.shape:
raise ValueError(
'Inputs for CompCor, timeseries and mask, do not have '
'matching spatial dimensions ({} and {}, respectively)'.format(
imgseries.shape[:3], mask.shape))
voxel_timecourses = imgseries[mask, :]
# Zero-out any bad values
voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0
# Currently support Legendre-polynomial or cosine or detrending
# With no filter, the mean is nonetheless removed (poly w/ degree 0)
if filter_type == 'cosine':
voxel_timecourses, basis = cosine_filter(
voxel_timecourses, repetition_time, period_cut)
elif filter_type in ('polynomial', False):
# from paper:
# "The constant and linear trends of the columns in the matrix M were
# removed [prior to ...]"
voxel_timecourses, basis = regress_poly(degree, voxel_timecourses)
# "Voxel time series from the noise ROI (either anatomical or tSTD) were
# placed in a matrix M of size Nxm, with time along the row dimension
# and voxels along the column dimension."
M = voxel_timecourses.T
# "[... were removed] prior to column-wise variance normalization."
M = M / _compute_tSTD(M, 1.)
# "The covariance matrix C = MMT was constructed and decomposed into its
# principal components using a singular value decomposition."
try:
u, _, _ = np.linalg.svd(M, full_matrices=False)
except np.linalg.LinAlgError:
try:
u, _, _ = linalg.svd(M, full_matrices=False, lapack_driver='gesvd')
except linalg.LinAlgError:
if self.inputs.failure_mode == 'error':
raise
u = np.ones((M.shape[0], num_components), dtype=np.float32) * np.nan
if components is None:
components = u[:, :num_components]
else:
components = np.hstack((components, u[:, :num_components]))
if components is None and num_components > 0:
if self.inputs.failure_mode == 'error':
raise ValueError('No components found')
components = np.ones((M.shape[0], num_components), dtype=np.float32) * np.nan
return components, basis
|
def compute_noise_components(imgseries, mask_images, num_components,
filter_type, degree, period_cut, repetition_time):
"""Compute the noise components from the imgseries for each mask
imgseries: a nibabel img
mask_images: a list of nibabel images
num_components: number of noise components to return
filter_type: type off filter to apply to time series before computing
noise components.
'polynomial' - Legendre polynomial basis
'cosine' - Discrete cosine (DCT) basis
False - None (mean-removal only)
Filter options:
degree: order of polynomial used to remove trends from the timeseries
period_cut: minimum period (in sec) for DCT high-pass filter
repetition_time: time (in sec) between volume acquisitions
returns:
components: a numpy array
basis: a numpy array containing the (non-constant) filter regressors
"""
components = None
basis = np.array([])
for img in mask_images:
mask = img.get_data().astype(np.bool)
if imgseries.shape[:3] != mask.shape:
raise ValueError(
'Inputs for CompCor, timeseries and mask, do not have '
'matching spatial dimensions ({} and {}, respectively)'.format(
imgseries.shape[:3], mask.shape))
voxel_timecourses = imgseries[mask, :]
# Zero-out any bad values
voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0
# Currently support Legendre-polynomial or cosine or detrending
# With no filter, the mean is nonetheless removed (poly w/ degree 0)
if filter_type == 'cosine':
voxel_timecourses, basis = cosine_filter(
voxel_timecourses, repetition_time, period_cut)
elif filter_type in ('polynomial', False):
# from paper:
# "The constant and linear trends of the columns in the matrix M were
# removed [prior to ...]"
voxel_timecourses, basis = regress_poly(degree, voxel_timecourses)
# "Voxel time series from the noise ROI (either anatomical or tSTD) were
# placed in a matrix M of size Nxm, with time along the row dimension
# and voxels along the column dimension."
M = voxel_timecourses.T
# "[... were removed] prior to column-wise variance normalization."
M = M / _compute_tSTD(M, 1.)
# "The covariance matrix C = MMT was constructed and decomposed into its
# principal components using a singular value decomposition."
try:
u, _, _ = fallback_svd(M, full_matrices=False)
except np.linalg.LinAlgError:
try:
u, _, _ = linalg.svd(M, full_matrices=False, lapack_driver='gesvd')
except linalg.LinAlgError:
if self.inputs.failure_mode == 'error':
raise
u = np.ones((M.shape[0], num_components), dtype=np.float32) * np.nan
if components is None:
components = u[:, :num_components]
else:
components = np.hstack((components, u[:, :num_components]))
if components is None and num_components > 0:
if self.inputs.failure_mode == 'error':
raise ValueError('No components found')
components = np.ones((M.shape[0], num_components), dtype=np.float32) * np.nan
return components, basis
|
32,508 |
def hold_message_summary_command():
"""
Getting counts of currently held messages for each hold reason.
Args:
args: input arguments for the command.
"""
response = http_request('POST', api_endpoint='/api/gateway/get-hold-summary-list', payload={'data': []})
if response.get('fail'):
raise Exception(json.dumps(response.get('fail')[0].get('errors')))
summery_list = response.get('data')
headers = {'policyInfo': 'Held Reason',
'numberOfItems': 'Number Of Items'
}
readable_output = tableToMarkdown('Message Summery', t=summery_list,
headerTransform=lambda header: headers.get(header),
removeNull=True)
return CommandResults(
outputs_prefix='Mimecast.HoldMessageSummary',
outputs_key_field='policyInfo',
readable_output=readable_output,
outputs=summery_list,
raw_response=response
)
|
def hold_message_summary_command():
"""
Getting counts of currently held messages for each hold reason.
Args:
args: input arguments for the command.
"""
response = http_request('POST', api_endpoint='/api/gateway/get-hold-summary-list', payload={'data': []})
if response.get('fail'):
raise Exception(json.dumps(response.get('fail')[0].get('errors')))
summery_list = response.get('data')
headers = {'policyInfo': 'Held Reason',
'numberOfItems': 'Number Of Items'
}
readable_output = tableToMarkdown('Message Summary', t=summery_list,
headerTransform=lambda header: headers.get(header),
removeNull=True)
return CommandResults(
outputs_prefix='Mimecast.HoldMessageSummary',
outputs_key_field='policyInfo',
readable_output=readable_output,
outputs=summery_list,
raw_response=response
)
|
14,302 |
def maxCtxSubtable(maxCtx, tag, lookupType, st):
"""Calculate usMaxContext based on a single lookup table (and an existing
max value).
"""
# single positioning, single / multiple substitution
if (tag == 'GPOS' and lookupType == 1) or (
tag == 'GSUB' and lookupType in (1, 2, 3)):
maxCtx = max(maxCtx, 1)
# pair positioning
elif tag == 'GPOS' and lookupType == 2:
maxCtx = max(maxCtx, 2)
# ligatures
elif tag == 'GSUB' and lookupType == 4:
for ligatures in st.ligatures.values():
for ligature in ligatures:
maxCtx = max(maxCtx, getattr(ligature, 'CompCount', 0))
# context
elif (tag == 'GPOS' and lookupType == 7) or (
tag == 'GSUB' and lookupType == 5):
maxCtx = maxCtxContextualSubtable(
maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub')
# chained context
elif (tag == 'GPOS' and lookupType == 8) or (
tag == 'GSUB' and lookupType == 6):
maxCtx = maxCtxContextualSubtable(
maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub', 'Chain')
# extensions
elif (tag == 'GPOS' and lookupType == 9) or (
tag == 'GSUB' and lookupType == 7):
maxCtx = maxCtxSubtable(
maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable)
# reverse-chained context
elif tag == 'GSUB' and lookupType == 8:
maxCtx = maxCtxContextualRule(maxCtx, st, 'Reverse')
return maxCtx
|
def maxCtxSubtable(maxCtx, tag, lookupType, st):
"""Calculate usMaxContext based on a single lookup table (and an existing
max value).
"""
# single positioning, single / multiple substitution
if (tag == 'GPOS' and lookupType == 1) or (
tag == 'GSUB' and lookupType in (1, 2, 3)):
maxCtx = max(maxCtx, 1)
# pair positioning
elif tag == 'GPOS' and lookupType == 2:
maxCtx = max(maxCtx, 2)
# ligatures
elif tag == 'GSUB' and lookupType == 4:
for ligatures in st.ligatures.values():
for ligature in ligatures:
maxCtx = max(maxCtx, len(ligature.Component))
# context
elif (tag == 'GPOS' and lookupType == 7) or (
tag == 'GSUB' and lookupType == 5):
maxCtx = maxCtxContextualSubtable(
maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub')
# chained context
elif (tag == 'GPOS' and lookupType == 8) or (
tag == 'GSUB' and lookupType == 6):
maxCtx = maxCtxContextualSubtable(
maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub', 'Chain')
# extensions
elif (tag == 'GPOS' and lookupType == 9) or (
tag == 'GSUB' and lookupType == 7):
maxCtx = maxCtxSubtable(
maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable)
# reverse-chained context
elif tag == 'GSUB' and lookupType == 8:
maxCtx = maxCtxContextualRule(maxCtx, st, 'Reverse')
return maxCtx
|
44,157 |
def find_and_place_cuts(
graph: MultiDiGraph,
cut_method: Callable = kahypar_cut,
cut_strategy: CutStrategy = None,
replace_wire_cuts=False,
**kwargs,
) -> MultiDiGraph:
"""Automatically finds and places optimal :class:`~.WireCut` nodes into a given tape-converted graph
using a customizable graph partitioning function. Preserves existing placed cuts.
Args:
graph (MultiDiGraph): The original (tape-converted) graph to be cut.
cut_method (Callable): A graph partitioning function that takes an input graph and returns
a list of edges to be cut based on a given set of constraints and objective. Defaults
to :func:`kahypar_cut` which requires KaHyPar to be installed using
``pip install kahypar`` for Linux and Mac users or visiting the
instructions `here <https://kahypar.org>`__ to compile from
source for Windows users.
cut_strategy (CutStrategy): Strategy for optimizing cutting parameters based on device
constraints. Defaults to ``None`` in which case ``kwargs`` must be fully specified
for passing to the ``cut_method``.
replace_wire_cuts (bool): Whether to replace :class:`~.WireCut` nodes with
:class:`~.MeasureNode` and :class:`~.PrepareNode` pairs. Defaults to ``False``.
kwargs: Additional keyword arguments to be passed to the callable ``cut_method``.
Returns:
nx.MultiDiGraph: Copy of the input graph with :class:`~.WireCut` nodes inserted.
**Example**
Consider the following 4-wire circuit with a single CNOT gate connecting the top (wires
``[0, 1]``) and bottom (wires ``["a", "b"]``) halves of the circuit. Note there's a
:class:`~.WireCut` manually placed into the circuit already.
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.1, wires=0)
qml.RY(0.2, wires=1)
qml.RX(0.3, wires="a")
qml.RY(0.4, wires="b")
qml.CNOT(wires=[0, 1])
qml.WireCut(wires=1)
qml.CNOT(wires=["a", "b"])
qml.CNOT(wires=[1, "a"])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=["a", "b"])
qml.RX(0.5, wires="a")
qml.RY(0.6, wires="b")
qml.expval(qml.PauliX(wires=[0]) @ qml.PauliY(wires=["a"]) @ qml.PauliZ(wires=["b"]))
>>> print(tape.draw())
0: ──RX(0.1)──╭C──────────╭C───────────╭┤ ⟨X ⊗ Y ⊗ Z⟩
1: ──RY(0.2)──╰X──//──╭C──╰X───────────│┤
a: ──RX(0.3)──╭C──────╰X──╭C──RX(0.5)──├┤ ⟨X ⊗ Y ⊗ Z⟩
b: ──RY(0.4)──╰X──────────╰X──RY(0.6)──╰┤ ⟨X ⊗ Y ⊗ Z⟩
Since the existing :class:`~.WireCut` doesn't sufficiently fragment the circuit, we can find the
remaining cuts using the default KaHyPar partitioner:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
num_fragments=2,
imbalance=0.5,
)
Visualizing the newly-placed cut:
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──RX(0.1)──╭C───────────────╭C────────╭┤ ⟨X ⊗ Y ⊗ Z⟩
1: ──RY(0.2)──╰X──//──╭C───//──╰X────────│┤
a: ──RX(0.3)──╭C──────╰X──╭C────RX(0.5)──├┤ ⟨X ⊗ Y ⊗ Z⟩
b: ──RY(0.4)──╰X──────────╰X────RY(0.6)──╰┤ ⟨X ⊗ Y ⊗ Z⟩
We can then proceed with the usual process of replacing :class:`~.WireCut` nodes with
pairs of :class:`~.MeasureNode` and :class:`~.PrepareNode`, and then break the graph
into fragments. Or, alternatively, we can directly get such processed graph by passing
``replace_wire_cuts=True``:
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
num_fragments=2,
imbalance=0.5,
replace_wire_cuts=True,
)
>>> frags, comm_graph = qml.transforms.qcut.fragment_graph(cut_graph)
>>> for t in frags:
... print(qml.transforms.qcut.graph_to_tape(t).draw())
.. code-block::
0: ──RX(0.1)──────╭C───────────────╭C──┤ ⟨X⟩
1: ──RY(0.2)──────╰X──MeasureNode──│───┤
2: ──PrepareNode───────────────────╰X──┤
a: ──RX(0.3)──────╭C──╭X──╭C────────────RX(0.5)──╭┤ ⟨Y ⊗ Z⟩
b: ──RY(0.4)──────╰X──│───╰X────────────RY(0.6)──╰┤ ⟨Y ⊗ Z⟩
1: ──PrepareNode──────╰C───MeasureNode────────────┤
Alternatively, if all we want to do is to find the optimal way to fit a circuit onto a smaller
device, a :class:`~.CutStrategy` can be used to populate the necessary explorations of cutting
parameters. As an extreme example, if the only device at our desposal is a 2-qubit device, a
simple cut strategy is to simply specify the the ``max_free_wires`` argument (or equivalently
directly passing a :class:`~.Device` to the ``device`` argument):
>>> cut_strategy = qml.transforms.qcut.CutStrategy(max_free_wires=2)
>>> print(cut_strategy.get_cut_kwargs(graph))
[{'num_fragments': 2, 'imbalance': 0.5714285714285714},
{'num_fragments': 3, 'imbalance': 1.4},
{'num_fragments': 4, 'imbalance': 1.75},
{'num_fragments': 5, 'imbalance': 2.3333333333333335},
{'num_fragments': 6, 'imbalance': 2.0},
{'num_fragments': 7, 'imbalance': 3.0},
{'num_fragments': 8, 'imbalance': 2.5},
{'num_fragments': 9, 'imbalance': 2.0},
{'num_fragments': 10, 'imbalance': 1.5},
{'num_fragments': 11, 'imbalance': 1.0},
{'num_fragments': 12, 'imbalance': 0.5},
{'num_fragments': 13, 'imbalance': 0.05},
{'num_fragments': 14, 'imbalance': 0.1}]
The printed list above shows all the possible cutting configurations one can attempt to perform
in order to search for the optimal cut. This is done by directly passing a
:class:`~.CutStrategy` to :func:`~.find_and_place_cuts`:
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
cut_strategy=cut_strategy,
)
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──RX──//─╭C──//────────╭C──//─────────┤ ╭<X@Y@Z>
1: ──RY──//─╰X──//─╭C──//─╰X─────────────┤ │
a: ──RX──//─╭C──//─╰X──//─╭C──//──RX──//─┤ ├<X@Y@Z>
b: ──RY──//─╰X──//────────╰X──//──RY─────┤ ╰<X@Y@Z>
As one can tell, quite a few cuts have to be made in order to execute the circuit on solely
2-qubit devices. To verify, let's print the fragments:
>>> qml.transforms.qcut.replace_wire_cut_nodes(cut_graph)
>>> frags, comm_graph = qml.transforms.qcut.fragment_graph(cut_graph)
>>> for t in frags:
... print(qml.transforms.qcut.graph_to_tape(t).draw())
.. code-block::
0: ──RX──MeasureNode─┤
1: ──RY──MeasureNode─┤
a: ──RX──MeasureNode─┤
b: ──RY──MeasureNode─┤
0: ──PrepareNode─╭C──MeasureNode─┤
1: ──PrepareNode─╰X──MeasureNode─┤
a: ──PrepareNode─╭C──MeasureNode─┤
b: ──PrepareNode─╰X──MeasureNode─┤
1: ──PrepareNode─╭C──MeasureNode─┤
a: ──PrepareNode─╰X──MeasureNode─┤
0: ──PrepareNode─╭C──MeasureNode─┤
1: ──PrepareNode─╰X──────────────┤
b: ──PrepareNode─╭X──MeasureNode─┤
a: ──PrepareNode─╰C──MeasureNode─┤
a: ──PrepareNode──RX──MeasureNode─┤
b: ──PrepareNode──RY─┤ <Z>
0: ──PrepareNode─┤ <X>
a: ──PrepareNode─┤ <Y>
"""
cut_graph = _remove_existing_cuts(graph)
if isinstance(cut_strategy, CutStrategy):
cut_kwargs_probed = cut_strategy.get_cut_kwargs(cut_graph)
# Need to reseed if a seed is passed:
seed = kwargs.pop("seed", None)
seeds = np.random.default_rng(seed).choice(2**15, cut_strategy.trials_per_probe).tolist()
cut_edges_probed = {
(cut_kwargs["num_fragments"], trial_id): cut_method(
cut_graph,
**{
**cut_kwargs,
**kwargs,
"seed": seed,
}, # kwargs has higher precedence for colliding keys
)
for cut_kwargs in cut_kwargs_probed
for trial_id, seed in zip(range(cut_strategy.trials_per_probe), seeds)
}
valid_cut_edges = {}
for (k, _), cut_edges in cut_edges_probed.items():
# The easiest way to tell if a cut is valid is to just do the fragment graph.
cut_graph = place_wire_cuts(graph=graph, cut_edges=cut_edges)
num_cuts = sum(isinstance(n, WireCut) for n in cut_graph.nodes)
replace_wire_cut_nodes(cut_graph)
frags, _ = fragment_graph(cut_graph)
if _is_valid_cut(
fragments=frags,
num_cuts=num_cuts,
num_fragments=k,
cut_candidates=valid_cut_edges,
max_free_wires=cut_strategy.max_free_wires,
):
valid_cut_edges[k] = cut_edges
if len(valid_cut_edges) < 1:
raise ValueError(
"Unable to find a circuit cutting that satisfies all constraints. "
"Are the constraints too strict?"
)
cut_edges = _get_optim_cut(valid_cut_edges)
else:
cut_edges = cut_method(cut_graph, **kwargs)
cut_graph = place_wire_cuts(graph=graph, cut_edges=cut_edges)
if replace_wire_cuts:
replace_wire_cut_nodes(cut_graph)
return cut_graph
|
def find_and_place_cuts(
graph: MultiDiGraph,
cut_method: Callable = kahypar_cut,
cut_strategy: CutStrategy = None,
replace_wire_cuts=False,
**kwargs,
) -> MultiDiGraph:
"""Automatically finds and places optimal :class:`~.WireCut` nodes into a given tape-converted graph
using a customizable graph partitioning function. Preserves existing placed cuts.
Args:
graph (MultiDiGraph): The original (tape-converted) graph to be cut.
cut_method (Callable): A graph partitioning function that takes an input graph and returns
a list of edges to be cut based on a given set of constraints and objective. Defaults
to :func:`kahypar_cut` which requires KaHyPar to be installed using
``pip install kahypar`` for Linux and Mac users or visiting the
instructions `here <https://kahypar.org>`__ to compile from
source for Windows users.
cut_strategy (CutStrategy): Strategy for optimizing cutting parameters based on device
constraints. Defaults to ``None`` in which case ``kwargs`` must be fully specified
for passing to the ``cut_method``.
replace_wire_cuts (bool): Whether to replace :class:`~.WireCut` nodes with
:class:`~.MeasureNode` and :class:`~.PrepareNode` pairs. Defaults to ``False``.
kwargs: Additional keyword arguments to be passed to the callable ``cut_method``.
Returns:
nx.MultiDiGraph: Copy of the input graph with :class:`~.WireCut` nodes inserted.
**Example**
Consider the following 4-wire circuit with a single CNOT gate connecting the top (wires
``[0, 1]``) and bottom (wires ``["a", "b"]``) halves of the circuit. Note there's a
:class:`~.WireCut` manually placed into the circuit already.
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.1, wires=0)
qml.RY(0.2, wires=1)
qml.RX(0.3, wires="a")
qml.RY(0.4, wires="b")
qml.CNOT(wires=[0, 1])
qml.WireCut(wires=1)
qml.CNOT(wires=["a", "b"])
qml.CNOT(wires=[1, "a"])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=["a", "b"])
qml.RX(0.5, wires="a")
qml.RY(0.6, wires="b")
qml.expval(qml.PauliX(wires=[0]) @ qml.PauliY(wires=["a"]) @ qml.PauliZ(wires=["b"]))
>>> print(tape.draw())
0: ──RX(0.1)──╭C──────────╭C───────────╭┤ ⟨X ⊗ Y ⊗ Z⟩
1: ──RY(0.2)──╰X──//──╭C──╰X───────────│┤
a: ──RX(0.3)──╭C──────╰X──╭C──RX(0.5)──├┤ ⟨X ⊗ Y ⊗ Z⟩
b: ──RY(0.4)──╰X──────────╰X──RY(0.6)──╰┤ ⟨X ⊗ Y ⊗ Z⟩
Since the existing :class:`~.WireCut` doesn't sufficiently fragment the circuit, we can find the
remaining cuts using the default KaHyPar partitioner:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
num_fragments=2,
imbalance=0.5,
)
Visualizing the newly-placed cut:
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──RX(0.1)──╭C───────────────╭C────────╭┤ ⟨X ⊗ Y ⊗ Z⟩
1: ──RY(0.2)──╰X──//──╭C───//──╰X────────│┤
a: ──RX(0.3)──╭C──────╰X──╭C────RX(0.5)──├┤ ⟨X ⊗ Y ⊗ Z⟩
b: ──RY(0.4)──╰X──────────╰X────RY(0.6)──╰┤ ⟨X ⊗ Y ⊗ Z⟩
We can then proceed with the usual process of replacing :class:`~.WireCut` nodes with
pairs of :class:`~.MeasureNode` and :class:`~.PrepareNode`, and then break the graph
into fragments. Or, alternatively, we can directly get such processed graph by passing
``replace_wire_cuts=True``:
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
num_fragments=2,
imbalance=0.5,
replace_wire_cuts=True,
)
>>> frags, comm_graph = qml.transforms.qcut.fragment_graph(cut_graph)
>>> for t in frags:
... print(qml.transforms.qcut.graph_to_tape(t).draw())
.. code-block::
0: ──RX(0.1)──────╭C───────────────╭C──┤ ⟨X⟩
1: ──RY(0.2)──────╰X──MeasureNode──│───┤
2: ──PrepareNode───────────────────╰X──┤
a: ──RX(0.3)──────╭C──╭X──╭C────────────RX(0.5)──╭┤ ⟨Y ⊗ Z⟩
b: ──RY(0.4)──────╰X──│───╰X────────────RY(0.6)──╰┤ ⟨Y ⊗ Z⟩
1: ──PrepareNode──────╰C───MeasureNode────────────┤
Alternatively, if all we want to do is to find the optimal way to fit a circuit onto a smaller
device, a :class:`~.CutStrategy` can be used to populate the necessary explorations of cutting
parameters. As an extreme example, if the only device at our disposal is a 2-qubit device, a
simple cut strategy is to simply specify the the ``max_free_wires`` argument (or equivalently
directly passing a :class:`~.Device` to the ``device`` argument):
>>> cut_strategy = qml.transforms.qcut.CutStrategy(max_free_wires=2)
>>> print(cut_strategy.get_cut_kwargs(graph))
[{'num_fragments': 2, 'imbalance': 0.5714285714285714},
{'num_fragments': 3, 'imbalance': 1.4},
{'num_fragments': 4, 'imbalance': 1.75},
{'num_fragments': 5, 'imbalance': 2.3333333333333335},
{'num_fragments': 6, 'imbalance': 2.0},
{'num_fragments': 7, 'imbalance': 3.0},
{'num_fragments': 8, 'imbalance': 2.5},
{'num_fragments': 9, 'imbalance': 2.0},
{'num_fragments': 10, 'imbalance': 1.5},
{'num_fragments': 11, 'imbalance': 1.0},
{'num_fragments': 12, 'imbalance': 0.5},
{'num_fragments': 13, 'imbalance': 0.05},
{'num_fragments': 14, 'imbalance': 0.1}]
The printed list above shows all the possible cutting configurations one can attempt to perform
in order to search for the optimal cut. This is done by directly passing a
:class:`~.CutStrategy` to :func:`~.find_and_place_cuts`:
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
cut_strategy=cut_strategy,
)
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──RX──//─╭C──//────────╭C──//─────────┤ ╭<X@Y@Z>
1: ──RY──//─╰X──//─╭C──//─╰X─────────────┤ │
a: ──RX──//─╭C──//─╰X──//─╭C──//──RX──//─┤ ├<X@Y@Z>
b: ──RY──//─╰X──//────────╰X──//──RY─────┤ ╰<X@Y@Z>
As one can tell, quite a few cuts have to be made in order to execute the circuit on solely
2-qubit devices. To verify, let's print the fragments:
>>> qml.transforms.qcut.replace_wire_cut_nodes(cut_graph)
>>> frags, comm_graph = qml.transforms.qcut.fragment_graph(cut_graph)
>>> for t in frags:
... print(qml.transforms.qcut.graph_to_tape(t).draw())
.. code-block::
0: ──RX──MeasureNode─┤
1: ──RY──MeasureNode─┤
a: ──RX──MeasureNode─┤
b: ──RY──MeasureNode─┤
0: ──PrepareNode─╭C──MeasureNode─┤
1: ──PrepareNode─╰X──MeasureNode─┤
a: ──PrepareNode─╭C──MeasureNode─┤
b: ──PrepareNode─╰X──MeasureNode─┤
1: ──PrepareNode─╭C──MeasureNode─┤
a: ──PrepareNode─╰X──MeasureNode─┤
0: ──PrepareNode─╭C──MeasureNode─┤
1: ──PrepareNode─╰X──────────────┤
b: ──PrepareNode─╭X──MeasureNode─┤
a: ──PrepareNode─╰C──MeasureNode─┤
a: ──PrepareNode──RX──MeasureNode─┤
b: ──PrepareNode──RY─┤ <Z>
0: ──PrepareNode─┤ <X>
a: ──PrepareNode─┤ <Y>
"""
cut_graph = _remove_existing_cuts(graph)
if isinstance(cut_strategy, CutStrategy):
cut_kwargs_probed = cut_strategy.get_cut_kwargs(cut_graph)
# Need to reseed if a seed is passed:
seed = kwargs.pop("seed", None)
seeds = np.random.default_rng(seed).choice(2**15, cut_strategy.trials_per_probe).tolist()
cut_edges_probed = {
(cut_kwargs["num_fragments"], trial_id): cut_method(
cut_graph,
**{
**cut_kwargs,
**kwargs,
"seed": seed,
}, # kwargs has higher precedence for colliding keys
)
for cut_kwargs in cut_kwargs_probed
for trial_id, seed in zip(range(cut_strategy.trials_per_probe), seeds)
}
valid_cut_edges = {}
for (k, _), cut_edges in cut_edges_probed.items():
# The easiest way to tell if a cut is valid is to just do the fragment graph.
cut_graph = place_wire_cuts(graph=graph, cut_edges=cut_edges)
num_cuts = sum(isinstance(n, WireCut) for n in cut_graph.nodes)
replace_wire_cut_nodes(cut_graph)
frags, _ = fragment_graph(cut_graph)
if _is_valid_cut(
fragments=frags,
num_cuts=num_cuts,
num_fragments=k,
cut_candidates=valid_cut_edges,
max_free_wires=cut_strategy.max_free_wires,
):
valid_cut_edges[k] = cut_edges
if len(valid_cut_edges) < 1:
raise ValueError(
"Unable to find a circuit cutting that satisfies all constraints. "
"Are the constraints too strict?"
)
cut_edges = _get_optim_cut(valid_cut_edges)
else:
cut_edges = cut_method(cut_graph, **kwargs)
cut_graph = place_wire_cuts(graph=graph, cut_edges=cut_edges)
if replace_wire_cuts:
replace_wire_cut_nodes(cut_graph)
return cut_graph
|
1,902 |
def _gaussian_random_matrix(n_components, n_features, random_state=None):
"""Generate a dense Gaussian random matrix.
The components of the random matrix are drawn from
N(0, 1.0 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generator used to generate the matrix
at fit time.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
components : numpy array of shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
GaussianRandomProjection
"""
_check_input_size(n_components, n_features)
rng = check_random_state(random_state)
components = rng.normal(loc=0.0,
scale=1.0 / np.sqrt(n_components),
size=(n_components, n_features))
return components
|
def _gaussian_random_matrix(n_components, n_features, random_state=None):
"""Generate a dense Gaussian random matrix.
The components of the random matrix are drawn from
N(0, 1.0 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
random_state : int or RandomState instance, default=None
Controls the pseudo random number generator used to generate the matrix
at fit time.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
components : numpy array of shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
GaussianRandomProjection
"""
_check_input_size(n_components, n_features)
rng = check_random_state(random_state)
components = rng.normal(loc=0.0,
scale=1.0 / np.sqrt(n_components),
size=(n_components, n_features))
return components
|
20,756 |
def trim_line(line: str) -> Optional[str]:
# Discards all rows that are not a vertex ("v"), face ("f") or vertex texture ("v")
values = line.split()
if values[0] == "vt":
return trim_vertex_texture(values)
elif values[0] == "f":
return trim_face(values)
elif values[0] == "v":
return trim_vertex(values)
return
|
def trim_line(line: str) -> Optional[str]:
# Discards all rows that are not a vertex ("v"), face ("f") or vertex texture ("vt")
values = line.split()
if values[0] == "vt":
return trim_vertex_texture(values)
elif values[0] == "f":
return trim_face(values)
elif values[0] == "v":
return trim_vertex(values)
return
|
35,286 |
def fista(AtB, pseudo_inverse, x=None, n_iter_max=100, non_negative=True, gradient_step=None,
sparsity_coefficient=None):
"""
Fast Iterative Shrinkage Thresholding Algorithm (FISTA)
Computes and approximate solution for Ax=b linear system.
Parameters
----------
AtB: ndarray
Pre-computed product of the transposed of A and B.
pseudo_inverse: ndarray
Pre-computed product of the transposed of A and A.
x: initialized array
Default: None
n_iter_max : int
Maximum number of iteration
Default: 100
non_negative : bool, default is False
if True, result will be non-negative
gradient_step : float
sparsity_coefficient : float or None
Returns
-------
x : Updated ndarray
Reference
----------
[1] : Beck, A., & Teboulle, M. (2009). A fast iterative
shrinkage-thresholding algorithm for linear inverse problems.
SIAM journal on imaging sciences, 2(1), 183-202.
"""
if sparsity_coefficient[-1] is None:
sparse = 0
else:
sparse = sparsity_coefficient[-1]
if gradient_step is None:
gradient_step = 0.001
if x is None:
x = tl.zeros([tl.shape(pseudo_inverse)[0], tl.shape(AtB)[1]])
# Parameters
momentum_old = tl.tensor(1.0)
norm_0 = 0.0
x_upd = tl.copy(x)
for iteration in range(n_iter_max):
gradient = - AtB + tl.tenalg.multi_mode_dot(x_upd, pseudo_inverse, transpose=False) + sparse
if non_negative is True:
delta_x = tl.where(gradient_step * gradient < x, gradient_step * gradient, x_upd)
else:
delta_x = gradient_step * gradient
xnew = x_upd - delta_x
momentum = (1 + tl.sqrt(1 + 4 * momentum_old ** 2)) / 2
x_upd = xnew + ((momentum_old - 1) / momentum) * (xnew - x)
momentum_old = momentum
x = tl.copy(xnew)
norm = tl.norm(delta_x)
if iteration == 1:
norm_0 = norm
if norm < 0.01 * norm_0:
break
return x
|
def fista(AtB, pseudo_inverse, x=None, n_iter_max=100, non_negative=True, gradient_step=None,
sparsity_coefficient=None):
"""
Fast Iterative Shrinkage Thresholding Algorithm (FISTA)
Computes and approximate solution for Ax=b linear system.
Parameters
----------
AtB: ndarray
Pre-computed product of the transposed of A and B.
pseudo_inverse: ndarray
Pre-computed product of the transposed of A and A.
x: initialized array
Default: None
n_iter_max : int
Maximum number of iteration
Default: 100
non_negative : bool, default is False
if True, result will be non-negative
gradient_step : float
sparsity_coefficient : float or None
Returns
-------
x : approximate solution such that Ax = b
Reference
----------
[1] : Beck, A., & Teboulle, M. (2009). A fast iterative
shrinkage-thresholding algorithm for linear inverse problems.
SIAM journal on imaging sciences, 2(1), 183-202.
"""
if sparsity_coefficient[-1] is None:
sparse = 0
else:
sparse = sparsity_coefficient[-1]
if gradient_step is None:
gradient_step = 0.001
if x is None:
x = tl.zeros([tl.shape(pseudo_inverse)[0], tl.shape(AtB)[1]])
# Parameters
momentum_old = tl.tensor(1.0)
norm_0 = 0.0
x_upd = tl.copy(x)
for iteration in range(n_iter_max):
gradient = - AtB + tl.tenalg.multi_mode_dot(x_upd, pseudo_inverse, transpose=False) + sparse
if non_negative is True:
delta_x = tl.where(gradient_step * gradient < x, gradient_step * gradient, x_upd)
else:
delta_x = gradient_step * gradient
xnew = x_upd - delta_x
momentum = (1 + tl.sqrt(1 + 4 * momentum_old ** 2)) / 2
x_upd = xnew + ((momentum_old - 1) / momentum) * (xnew - x)
momentum_old = momentum
x = tl.copy(xnew)
norm = tl.norm(delta_x)
if iteration == 1:
norm_0 = norm
if norm < 0.01 * norm_0:
break
return x
|
31,770 |
def create_member(args):
try:
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
accountDetails = []
account = {'AccountId': args.get('accountId'), 'Email': args.get('email')}
accountDetails.append(account)
response = client.create_members(
DetectorId=args.get('detectorId'),
AccountDetails=accountDetails
)
unprocessed_accounts = response.get('UnprocessedAccounts', [])
ec = {"AWS.GuardDuty.CreateMember.UnprocessedAccounts": unprocessed_accounts} \
if unprocessed_accounts else None
return create_entry('AWS GuardDuty Create Member', unprocessed_accounts, ec)
except Exception as e:
return raise_error(e)
|
def create_member(args):
try:
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
account_details = [{'AccountId': args.get('accountId'), 'Email': args.get('email')}]
response = client.create_members(
DetectorId=args.get('detectorId'),
AccountDetails=accountDetails
)
unprocessed_accounts = response.get('UnprocessedAccounts', [])
ec = {"AWS.GuardDuty.CreateMember.UnprocessedAccounts": unprocessed_accounts} \
if unprocessed_accounts else None
return create_entry('AWS GuardDuty Create Member', unprocessed_accounts, ec)
except Exception as e:
return raise_error(e)
|
35,283 |
def non_negative_parafac_hals(tensor, rank, n_iter_max=100, init="svd", svd='numpy_svd', tol=1e-7,
sparsity_coefficients=[], fixed_modes=[],hals='approx',
verbose=False, return_errors=False):
"""
Non-negative CP decomposition
Uses HALS which updates each factor columnwise, fixing every other columns, see [1]_
Parameters
----------
tensor : ndarray
rank : int
number of components
n_iter_max : int
maximum number of iteration
init : {'svd', 'random'}, optional
svd : str, default is 'numpy_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
tol : float, optional
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
Default: 1e-8
sparsity_coefficients: array of float (of length the number of modes)
The sparsity coefficients on each factor.
If set to None, the algorithm is computed without sparsity
Default: [],
fixed_modes: array of integers (between 0 and the number of modes)
Has to be set not to update a factor, 0 and 1 for U and V respectively
Default: []
verbose: boolean
Indicates whether the algorithm prints the successive
reconstruction errors or not
Default: False
return_errors: boolean
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
Returns
-------
factors : ndarray list
list of positive factors of the CP decomposition
element `i` is of shape ``(tensor.shape[i], rank)``
errors: list
A list of reconstruction errors at each iteration of the algorithm.
toc: list
A list with accumulated time at each iterations
fixed_modes = [], normalize = [False, False, False],
verbose = True, return_errors = False)
References
----------
[1]: N. Gillis and F. Glineur, Accelerated Multiplicative Updates and
Hierarchical ALS Algorithms for Nonnegative Matrix Factorization,
Neural Computation 24 (4): 1085-1105, 2012.
"""
weights, factors = initialize_nn_cp(tensor, rank, init=init, svd=svd,
random_state=None,
normalize_factors=False)
norm_tensor = tl.norm(tensor, 2)
nb_modes = len(tensor.shape)
if sparsity_coefficients == None or len(sparsity_coefficients) != nb_modes:
#print(
# "Irrelevant number of sparsity coefficient (different from the number of modes), they have been set to None.")
sparsity_coefficients = [None for i in range(nb_modes)]
if fixed_modes == None:
fixed_modes = []
# Avoiding errors
for fixed_value in fixed_modes:
sparsity_coefficients[fixed_value] = None
# Generating the mode update sequence
modes_list = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes]
# initialisation - declare local varaibles
rec_errors = []
# Iteratation
for iteration in range(n_iter_max):
# One pass of least squares on each updated mode
for mode in modes_list:
# Computing Hadamard of cross-products
pseudo_inverse = tl.tensor(tl.ones((rank, rank)), **tl.context(tensor))
for i, factor in enumerate(factors):
if i != mode:
pseudo_inverse = pseudo_inverse*tl.dot(tl.transpose(factor), factor)
if not iteration and weights is not None:
# Take into account init weights
mttkrp = unfolding_dot_khatri_rao(tensor, (weights, factors), mode)
else:
mttkrp = unfolding_dot_khatri_rao(tensor, (None, factors), mode)
# Call the hals resolution with nnls, optimizing the current mode
if hals=='approx':
factors[mode] = tl.transpose(
hals_nnls_approx(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]),
maxiter=100,sparsity_coefficient=sparsity_coefficients[mode])[0])
elif hals=='exact':
factors[mode] = tl.transpose(
hals_nnls_exact(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]),
maxiter=5000)[0])
if tol:
factors_norm = cp_norm((weights, factors))
iprod = tl.sum(tl.sum(mttkrp*factor, axis=0)*weights)
rec_error = tl.sqrt(tl.abs(norm_tensor**2 + factors_norm**2 - 2*iprod)) / norm_tensor
rec_errors.append(rec_error)
if iteration > 1:
if verbose:
print('reconstruction error={}, variation={}.'.format(
rec_errors[-1], rec_errors[-2] - rec_errors[-1]))
if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
if verbose:
print('converged in {} iterations.'.format(iteration))
break
cp_tensor = CPTensor((weights, factors))
if return_errors:
return cp_tensor, rec_errors
else:
return cp_tensor
|
def non_negative_parafac_hals(tensor, rank, n_iter_max=100, init="svd", svd='numpy_svd', tol=1e-7,
sparsity_coefficients=[], fixed_modes=[],hals='approx',
verbose=False, return_errors=False):
"""
Non-negative CP decomposition
Uses HALS which updates each factor columnwise, fixing every other columns, see [1]_
Parameters
----------
tensor : ndarray
rank : int
number of components
n_iter_max : int
maximum number of iteration
init : {'svd', 'random'}, optional
svd : str, default is 'numpy_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
tol : float, optional
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
Default: 1e-8
sparsity_coefficients: array of float (of length the number of modes)
The sparsity coefficients on each factor.
If set to None, the algorithm is computed without sparsity
Default: [],
fixed_modes: array of integers (between 0 and the number of modes)
Has to be set not to update a factor, 0 and 1 for U and V respectively
Default: []
verbose: boolean
Indicates whether the algorithm prints the successive
reconstruction errors or not
Default: False
return_errors: boolean
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
Returns
-------
factors : ndarray list
list of positive factors of the CP decomposition
element `i` is of shape ``(tensor.shape[i], rank)``
errors: list
A list of reconstruction errors at each iteration of the algorithm.
toc: list
A list with accumulated time at each iterations
fixed_modes = [], normalize = [False, False, False],
verbose = True, return_errors = False)
References
----------
[1]: N. Gillis and F. Glineur, Accelerated Multiplicative Updates and
Hierarchical ALS Algorithms for Nonnegative Matrix Factorization,
Neural Computation 24 (4): 1085-1105, 2012.
"""
weights, factors = initialize_nn_cp(tensor, rank, init=init, svd=svd,
random_state=None,
normalize_factors=False)
norm_tensor = tl.norm(tensor, 2)
nb_modes = len(tensor.shape)
if sparsity_coefficients == None or len(sparsity_coefficients) != nb_modes:
#print(
# "Irrelevant number of sparsity coefficient (different from the number of modes), they have been set to None.")
sparsity_coefficients = [None for i in range(nb_modes)]
if fixed_modes == None:
fixed_modes = []
# Avoiding errors
for fixed_value in fixed_modes:
sparsity_coefficients[fixed_value] = None
# Generating the mode update sequence
modes = [mode for mode in range(n_modes) if mode not in fixed_modes]
# initialisation - declare local varaibles
rec_errors = []
# Iteratation
for iteration in range(n_iter_max):
# One pass of least squares on each updated mode
for mode in modes_list:
# Computing Hadamard of cross-products
pseudo_inverse = tl.tensor(tl.ones((rank, rank)), **tl.context(tensor))
for i, factor in enumerate(factors):
if i != mode:
pseudo_inverse = pseudo_inverse*tl.dot(tl.transpose(factor), factor)
if not iteration and weights is not None:
# Take into account init weights
mttkrp = unfolding_dot_khatri_rao(tensor, (weights, factors), mode)
else:
mttkrp = unfolding_dot_khatri_rao(tensor, (None, factors), mode)
# Call the hals resolution with nnls, optimizing the current mode
if hals=='approx':
factors[mode] = tl.transpose(
hals_nnls_approx(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]),
maxiter=100,sparsity_coefficient=sparsity_coefficients[mode])[0])
elif hals=='exact':
factors[mode] = tl.transpose(
hals_nnls_exact(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]),
maxiter=5000)[0])
if tol:
factors_norm = cp_norm((weights, factors))
iprod = tl.sum(tl.sum(mttkrp*factor, axis=0)*weights)
rec_error = tl.sqrt(tl.abs(norm_tensor**2 + factors_norm**2 - 2*iprod)) / norm_tensor
rec_errors.append(rec_error)
if iteration > 1:
if verbose:
print('reconstruction error={}, variation={}.'.format(
rec_errors[-1], rec_errors[-2] - rec_errors[-1]))
if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
if verbose:
print('converged in {} iterations.'.format(iteration))
break
cp_tensor = CPTensor((weights, factors))
if return_errors:
return cp_tensor, rec_errors
else:
return cp_tensor
|
32,253 |
def install_packs(client: demisto_client,
host: str,
packs_to_install: list,
request_timeout: int = 999999
):
""" Make a packs installation request.
If a pack fails to install due to malformed pack, this function catches the corrupted pack and call another
request to install packs again, this time without the corrupted pack.
If a pack fails to install due to timeout when sending a request to GCP,
request to install all packs again once more.
Args:
client (demisto_client): The configured client to use.
host (str): The server URL.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
"""
class GCPTimeOutException(ApiException):
def __init__(self, error):
if '/packs/' in error:
self.pack_id = error.split('/packs/')[1].split('.zip')[0].split('/')[0] # TODO: regex?
super().__init__()
class MalformedPackException(ApiException):
def __init__(self, pack_ids):
self.malformed_ids = pack_ids
super().__init__()
def call_install_packs_request(packs):
try:
logging.debug(f'Installing the following packs on server {host}:\n{[pack["id"] for pack in packs]}')
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body={'packs': packs,
'ignoreWarnings': True},
accept='application/json',
_request_timeout=request_timeout)
if status_code in range(200, 300):
packs_data = [{'ID': pack.get('id'), 'CurrentVersion': pack.get('currentVersion')} for pack in
ast.literal_eval(response_data)]
logging.success(f'Packs were successfully installed on server {host}')
logging.debug(f'The packs that were successfully installed on server {host}:\n{packs_data}')
except ApiException as ex:
if 'timeout awaiting response' in ex.body:
raise GCPTimeOutException(ex.body)
if malformed_ids := find_malformed_pack_id(ex.body):
raise MalformedPackException(malformed_ids)
raise ex
try:
logging.info(f'Installing packs on server {host}')
logging.info(f'TESTING: adding failing pack to pack list to create failure')
packs_to_install.append({'id': 'PhishAI', 'version': '1.0.0'}) # TODO: remove failing pack!
try:
call_install_packs_request(packs_to_install)
except MalformedPackException as e:
# if this is malformed pack error, remove malformed packs and retry until success
handle_malformed_pack_ids(e.malformed_ids, packs_to_install)
logging.warning(f'The request to install packs on server {host} has failed, retrying without packs '
f'{e.malformed_ids}')
return install_packs(client, host, [pack for pack in packs_to_install if pack['id'] not in e.malformed_ids],
request_timeout)
except GCPTimeOutException as e:
# if this is a gcp timeout, try only once more
logging.warning(f'The request to install packs on server {host} has failed due to timeout awaiting response'
f' headers while trying to install pack {e.pack_id}, trying again for one time')
call_install_packs_request(packs_to_install)
except Exception as e:
logging.exception(f'The request to install packs has failed. Additional info: {str(e)}')
global SUCCESS_FLAG
SUCCESS_FLAG = False
finally:
return SUCCESS_FLAG
|
def install_packs(client: demisto_client,
host: str,
packs_to_install: list,
request_timeout: int = 999999,
):
""" Make a packs installation request.
If a pack fails to install due to malformed pack, this function catches the corrupted pack and call another
request to install packs again, this time without the corrupted pack.
If a pack fails to install due to timeout when sending a request to GCP,
request to install all packs again once more.
Args:
client (demisto_client): The configured client to use.
host (str): The server URL.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
"""
class GCPTimeOutException(ApiException):
def __init__(self, error):
if '/packs/' in error:
self.pack_id = error.split('/packs/')[1].split('.zip')[0].split('/')[0] # TODO: regex?
super().__init__()
class MalformedPackException(ApiException):
def __init__(self, pack_ids):
self.malformed_ids = pack_ids
super().__init__()
def call_install_packs_request(packs):
try:
logging.debug(f'Installing the following packs on server {host}:\n{[pack["id"] for pack in packs]}')
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body={'packs': packs,
'ignoreWarnings': True},
accept='application/json',
_request_timeout=request_timeout)
if status_code in range(200, 300):
packs_data = [{'ID': pack.get('id'), 'CurrentVersion': pack.get('currentVersion')} for pack in
ast.literal_eval(response_data)]
logging.success(f'Packs were successfully installed on server {host}')
logging.debug(f'The packs that were successfully installed on server {host}:\n{packs_data}')
except ApiException as ex:
if 'timeout awaiting response' in ex.body:
raise GCPTimeOutException(ex.body)
if malformed_ids := find_malformed_pack_id(ex.body):
raise MalformedPackException(malformed_ids)
raise ex
try:
logging.info(f'Installing packs on server {host}')
logging.info(f'TESTING: adding failing pack to pack list to create failure')
packs_to_install.append({'id': 'PhishAI', 'version': '1.0.0'}) # TODO: remove failing pack!
try:
call_install_packs_request(packs_to_install)
except MalformedPackException as e:
# if this is malformed pack error, remove malformed packs and retry until success
handle_malformed_pack_ids(e.malformed_ids, packs_to_install)
logging.warning(f'The request to install packs on server {host} has failed, retrying without packs '
f'{e.malformed_ids}')
return install_packs(client, host, [pack for pack in packs_to_install if pack['id'] not in e.malformed_ids],
request_timeout)
except GCPTimeOutException as e:
# if this is a gcp timeout, try only once more
logging.warning(f'The request to install packs on server {host} has failed due to timeout awaiting response'
f' headers while trying to install pack {e.pack_id}, trying again for one time')
call_install_packs_request(packs_to_install)
except Exception as e:
logging.exception(f'The request to install packs has failed. Additional info: {str(e)}')
global SUCCESS_FLAG
SUCCESS_FLAG = False
finally:
return SUCCESS_FLAG
|
9,206 |
def getPSF(SCA, bandpass,
SCA_pos=None, pupil_bin=4, n_waves=None, extra_aberrations=None,
wavelength=None, gsparams=None,
logger=None, high_accuracy=None, approximate_struts=None):
"""Get a single PSF for Roman ST observations.
The user must provide the SCA and bandpass; the latter is used when setting up the pupil
plane configuration and when interpolating chromatic information, if requested.
This routine carries out linear interpolation of the aberrations within a given SCA, based on
the Roman (then WFIRST) Cycle 7 specification of the aberrations as a function of focal plane
position, more specifically from ``Roman_Phase-A_SRR_WFC_Zernike_and_Field_Data_170727.xlsm``
downloaded from https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html. Phase
B updates that became available in mid-2019 have not yet been incorporated into this module.
(Note: the files at that url still use the old WFIRST name. We have renamed them to use the
new name of the telescope, Roman, after downloading.)
The mask images for the Roman pupil plane are available at from the Roman Reference Information
page: https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html.
There are separate files for each SCA, since the view of the spider pattern varies somwhat
across the field of view of the wide field camera. Furthermore, the effect of the obscuration
is somewhat different at longer wavelengths, so F184 has a different set fo files than the
other filters. cf. the ``galsm.roman.longwave_bands`` and ``galsim.roman.shortwave_bands``
attributes, which define which bands use which pupil plane images.
To avoid using the full pupil plane configuration, use the optional keyword ``pupil_bin``.
The full pupil-plane images are 4096 x 4096, which is more detail than is typically needed for
most applications. The default binning is 4x4, which results in an image that is 1024 x 1024.
This provides enough detail for most purposes and is much faster to render than using the full
pupil plane image. Using pupil_bin=8 (resulting in a 512 x 512 image) still provides fairly
reasonable results and is even faster to render, but it is not recommended to use higher
binning than that, as the diffraction spikes will be noticeably degraded.
Also note that currently the orientation of the struts is fixed, rather than rotating depending
on the orientation of the focal plane. Rotation of the PSF can easily be affected by the user
via::
psf = galsim.roman.getPSF(...).rotate(angle)
which will rotate the entire PSF (including the diffraction spikes and all other features).
The calculation takes advantage of the fact that the diffraction limit and aberrations have a
simple, understood wavelength-dependence. (The Roman project webpage for Cycle 7 does in fact
provide aberrations as a function of wavelength, but the deviation from the expected chromatic
dependence is sub-percent so we neglect it here.) For reference, the script used to parse the
Zernikes given on the webpage and create the files in the GalSim repository can be found in
``devel/external/parse_roman_zernikes_1217.py``. The resulting chromatic object can be used to
draw into any of the Roman bandpasses, though the pupil plane configuration will only be
correct for those bands in the same range (i.e., long- or short-wavelength bands).
For applications that require very high accuracy in the modeling of the PSF, with very limited
aliasing, you may want to lower the folding_threshold in the gsparams. Otherwise very bright
stars will show some reflections in the spider pattern and possibly some boxiness at the
outskirts of the PSF. Using ``gsparams = GSParams(folding_threshold=2.e-3)`` generally
provides good results even for very bright (e.g. mag=10) stars. In these cases, you probably
also want to reduce ``pupil_bin`` somewhat from the default value of 4.
By default, no additional aberrations are included above the basic design. However, users can
provide an optional keyword ``extra_aberrations`` that will be included on top of those that are
part of the design. This should be in the same format as for the ChromaticOpticalPSF class,
with units of waves at the fiducial wavelength, 1293 nm. Currently, only aberrations up to order
22 (Noll convention) are simulated. For Roman, the tolerance for additional
aberrations was a total of 90 nanometers RMS as of mid-2015, distributed largely among coma,
astigmatism, trefoil, and spherical aberrations (NOT defocus). This information might serve as
a guide for reasonable ``extra_aberrations`` inputs. The reference for that number is
an earlier Cycle 5 document:
http://roman.gsfc.nasa.gov/science/sdt_public/wps/references/instrument/README_AFTA_C5_WFC_Zernike_and_Field_Data.pdf
However, the default (non-extra) aberrations are from Cycle 7 material linked earlier in this
docstring.
Jitter and charge diffusion are, by default, not included. Users who wish to include these can
find some guidelines for typical length scales of the Gaussians that can represent these
effects, and convolve the ChromaticOpticalPSF with appropriate achromatic Gaussians.
The PSFs are always defined assuming the user will specify length scales in arcsec.
Users may find they do not have to call `getPSF` for all objects in their simulations; for a
given SCA and position within the SCA, and a given pupil plane configuration and wavelength
information, it should be possible to reuse the PSFs.
Parameters:
SCA: Single value specifying the SCA for which the PSF should be
loaded.
bandpass: Single string specifying the bandpass to use when defining the
pupil plane configuration and/or interpolation of chromatic PSFs.
You may also pass a string 'long' or 'short' for this argument, in
which case, the correct pupil plane configuration will be used for
long- or short-wavelength bands (F184 is long, all else is short).
In this case, no interpolation can be used, since it is defined
using the extent of the chosen bandpass. If ``wavelength`` is given,
then bandpass may be None, which will use the short-wavelength pupil
plane image.
SCA_pos: Single galsim.PositionD indicating the position within the SCA
for which the PSF should be created. If None, the exact center of
the SCA is chosen. [default: None]
pupil_bin: The binning to apply to the pupil plane image. (See discussion above.)
[default: 4]
n_waves: Number of wavelengths to use for setting up interpolation of the
chromatic PSF objects, which can lead to much faster image
rendering. If None, then no interpolation is used. Note that
users who want to interpolate can always set up the interpolation
later on even if they do not do so when calling `getPSF`.
[default: None]
extra_aberrations: Array of extra aberrations to include in the PSF model, on top of
those that are part of the Roman design. These should be
provided in units of waves at the fiducial wavelength of 1293 nm,
as an array of length 23 with entries 4 through 22 corresponding
to defocus through the 22nd Zernike in the Noll convention.
[default: None]
wavelength: An option to get an achromatic PSF for a single wavelength, for
users who do not care about chromaticity of the PSF. If None,
then the fully chromatic PSF is returned. Alternatively the user
should supply either (a) a wavelength in nanometers, and they
will get achromatic OpticalPSF objects for that wavelength, or
(b) a bandpass object, in which case they will get achromatic
OpticalPSF objects defined at the effective wavelength of that
bandpass. [default: False]
gsparams: An optional GSParams argument. See the docstring for GSParams
for details. [default: None]
Returns:
A single PSF object (either a ChromaticOpticalPSF or an OpticalPSF depending on the
inputs).
"""
from ..position import PositionD
from ..errors import GalSimValueError, GalSimRangeError
from ..bandpass import Bandpass
from . import n_pix, n_sca, longwave_bands, shortwave_bands
# Deprecated options
if high_accuracy:
if approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True,approximate_struts=True', 2.3,
'pupil_bin=4, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 4
else:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True', 2.3,
'pupil_bin=1, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 1
elif approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=True', 2.3, 'pupil_bin=8',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 8
elif approximate_struts is False or high_accuracy is False:
# If they are explicitly given, rather than default (None), then trigger this.
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=False, high_accuracy=False', 2.3, 'pupil_bin=4',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 4
if SCA <= 0 or SCA > n_sca:
raise GalSimRangeError("Invalid SCA.", SCA, 1, n_sca)
# SCA_pos: if None, then all should just be center of the SCA.
if SCA_pos is None:
SCA_pos = PositionD(n_pix/2, n_pix/2)
# Parse the bandpasses to see which pupil plane image is needed
pupil_plane_type = None
if bandpass in longwave_bands or bandpass=='long':
pupil_plane_type = 'long'
elif bandpass in shortwave_bands or bandpass=='short':
pupil_plane_type = 'short'
elif bandpass is None and n_waves is None:
pupil_plane_type = 'short'
else:
raise GalSimValueError("Bandpass not a valid Roman bandpass or 'short'/'long'.",
bandpass, default_bandpass_list)
# If bandpass is 'short'/'long', then make sure that interpolation is not called for, since that
# requires an actual bandpass.
if bandpass in ['short','long'] and n_waves is not None:
raise GalSimValueError("Cannot use bandpass='short'/'long' with interpolation.", bandpass)
if not isinstance(wavelength, (Bandpass, float, type(None))):
raise TypeError("wavelength should either be a Bandpass, float, or None.")
# Now call _get_single_PSF().
psf = _get_single_PSF(SCA, bandpass, SCA_pos, pupil_bin,
n_waves, extra_aberrations, wavelength,
pupil_plane_type, gsparams)
return psf
|
def getPSF(SCA, bandpass,
SCA_pos=None, pupil_bin=4, n_waves=None, extra_aberrations=None,
wavelength=None, gsparams=None,
logger=None, high_accuracy=None, approximate_struts=None):
"""Get a single PSF for Roman ST observations.
The user must provide the SCA and bandpass; the latter is used when setting up the pupil
plane configuration and when interpolating chromatic information, if requested.
This routine carries out linear interpolation of the aberrations within a given SCA, based on
the Roman (then WFIRST) Cycle 7 specification of the aberrations as a function of focal plane
position, more specifically from ``Roman_Phase-A_SRR_WFC_Zernike_and_Field_Data_170727.xlsm``
downloaded from https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html. Phase
B updates that became available in mid-2019 have not yet been incorporated into this module.
(Note: the files at that url still use the old WFIRST name. We have renamed them to use the
new name of the telescope, Roman, after downloading.)
The mask images for the Roman pupil plane are available at from the Roman Reference Information
page: https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html.
There are separate files for each SCA, since the view of the spider pattern varies somwhat
across the field of view of the wide field camera. Furthermore, the effect of the obscuration
is somewhat different at longer wavelengths, so F184 has a different set of files than the
other filters. cf. the ``galsm.roman.longwave_bands`` and ``galsim.roman.shortwave_bands``
attributes, which define which bands use which pupil plane images.
To avoid using the full pupil plane configuration, use the optional keyword ``pupil_bin``.
The full pupil-plane images are 4096 x 4096, which is more detail than is typically needed for
most applications. The default binning is 4x4, which results in an image that is 1024 x 1024.
This provides enough detail for most purposes and is much faster to render than using the full
pupil plane image. Using pupil_bin=8 (resulting in a 512 x 512 image) still provides fairly
reasonable results and is even faster to render, but it is not recommended to use higher
binning than that, as the diffraction spikes will be noticeably degraded.
Also note that currently the orientation of the struts is fixed, rather than rotating depending
on the orientation of the focal plane. Rotation of the PSF can easily be affected by the user
via::
psf = galsim.roman.getPSF(...).rotate(angle)
which will rotate the entire PSF (including the diffraction spikes and all other features).
The calculation takes advantage of the fact that the diffraction limit and aberrations have a
simple, understood wavelength-dependence. (The Roman project webpage for Cycle 7 does in fact
provide aberrations as a function of wavelength, but the deviation from the expected chromatic
dependence is sub-percent so we neglect it here.) For reference, the script used to parse the
Zernikes given on the webpage and create the files in the GalSim repository can be found in
``devel/external/parse_roman_zernikes_1217.py``. The resulting chromatic object can be used to
draw into any of the Roman bandpasses, though the pupil plane configuration will only be
correct for those bands in the same range (i.e., long- or short-wavelength bands).
For applications that require very high accuracy in the modeling of the PSF, with very limited
aliasing, you may want to lower the folding_threshold in the gsparams. Otherwise very bright
stars will show some reflections in the spider pattern and possibly some boxiness at the
outskirts of the PSF. Using ``gsparams = GSParams(folding_threshold=2.e-3)`` generally
provides good results even for very bright (e.g. mag=10) stars. In these cases, you probably
also want to reduce ``pupil_bin`` somewhat from the default value of 4.
By default, no additional aberrations are included above the basic design. However, users can
provide an optional keyword ``extra_aberrations`` that will be included on top of those that are
part of the design. This should be in the same format as for the ChromaticOpticalPSF class,
with units of waves at the fiducial wavelength, 1293 nm. Currently, only aberrations up to order
22 (Noll convention) are simulated. For Roman, the tolerance for additional
aberrations was a total of 90 nanometers RMS as of mid-2015, distributed largely among coma,
astigmatism, trefoil, and spherical aberrations (NOT defocus). This information might serve as
a guide for reasonable ``extra_aberrations`` inputs. The reference for that number is
an earlier Cycle 5 document:
http://roman.gsfc.nasa.gov/science/sdt_public/wps/references/instrument/README_AFTA_C5_WFC_Zernike_and_Field_Data.pdf
However, the default (non-extra) aberrations are from Cycle 7 material linked earlier in this
docstring.
Jitter and charge diffusion are, by default, not included. Users who wish to include these can
find some guidelines for typical length scales of the Gaussians that can represent these
effects, and convolve the ChromaticOpticalPSF with appropriate achromatic Gaussians.
The PSFs are always defined assuming the user will specify length scales in arcsec.
Users may find they do not have to call `getPSF` for all objects in their simulations; for a
given SCA and position within the SCA, and a given pupil plane configuration and wavelength
information, it should be possible to reuse the PSFs.
Parameters:
SCA: Single value specifying the SCA for which the PSF should be
loaded.
bandpass: Single string specifying the bandpass to use when defining the
pupil plane configuration and/or interpolation of chromatic PSFs.
You may also pass a string 'long' or 'short' for this argument, in
which case, the correct pupil plane configuration will be used for
long- or short-wavelength bands (F184 is long, all else is short).
In this case, no interpolation can be used, since it is defined
using the extent of the chosen bandpass. If ``wavelength`` is given,
then bandpass may be None, which will use the short-wavelength pupil
plane image.
SCA_pos: Single galsim.PositionD indicating the position within the SCA
for which the PSF should be created. If None, the exact center of
the SCA is chosen. [default: None]
pupil_bin: The binning to apply to the pupil plane image. (See discussion above.)
[default: 4]
n_waves: Number of wavelengths to use for setting up interpolation of the
chromatic PSF objects, which can lead to much faster image
rendering. If None, then no interpolation is used. Note that
users who want to interpolate can always set up the interpolation
later on even if they do not do so when calling `getPSF`.
[default: None]
extra_aberrations: Array of extra aberrations to include in the PSF model, on top of
those that are part of the Roman design. These should be
provided in units of waves at the fiducial wavelength of 1293 nm,
as an array of length 23 with entries 4 through 22 corresponding
to defocus through the 22nd Zernike in the Noll convention.
[default: None]
wavelength: An option to get an achromatic PSF for a single wavelength, for
users who do not care about chromaticity of the PSF. If None,
then the fully chromatic PSF is returned. Alternatively the user
should supply either (a) a wavelength in nanometers, and they
will get achromatic OpticalPSF objects for that wavelength, or
(b) a bandpass object, in which case they will get achromatic
OpticalPSF objects defined at the effective wavelength of that
bandpass. [default: False]
gsparams: An optional GSParams argument. See the docstring for GSParams
for details. [default: None]
Returns:
A single PSF object (either a ChromaticOpticalPSF or an OpticalPSF depending on the
inputs).
"""
from ..position import PositionD
from ..errors import GalSimValueError, GalSimRangeError
from ..bandpass import Bandpass
from . import n_pix, n_sca, longwave_bands, shortwave_bands
# Deprecated options
if high_accuracy:
if approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True,approximate_struts=True', 2.3,
'pupil_bin=4, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 4
else:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True', 2.3,
'pupil_bin=1, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 1
elif approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=True', 2.3, 'pupil_bin=8',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 8
elif approximate_struts is False or high_accuracy is False:
# If they are explicitly given, rather than default (None), then trigger this.
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=False, high_accuracy=False', 2.3, 'pupil_bin=4',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 4
if SCA <= 0 or SCA > n_sca:
raise GalSimRangeError("Invalid SCA.", SCA, 1, n_sca)
# SCA_pos: if None, then all should just be center of the SCA.
if SCA_pos is None:
SCA_pos = PositionD(n_pix/2, n_pix/2)
# Parse the bandpasses to see which pupil plane image is needed
pupil_plane_type = None
if bandpass in longwave_bands or bandpass=='long':
pupil_plane_type = 'long'
elif bandpass in shortwave_bands or bandpass=='short':
pupil_plane_type = 'short'
elif bandpass is None and n_waves is None:
pupil_plane_type = 'short'
else:
raise GalSimValueError("Bandpass not a valid Roman bandpass or 'short'/'long'.",
bandpass, default_bandpass_list)
# If bandpass is 'short'/'long', then make sure that interpolation is not called for, since that
# requires an actual bandpass.
if bandpass in ['short','long'] and n_waves is not None:
raise GalSimValueError("Cannot use bandpass='short'/'long' with interpolation.", bandpass)
if not isinstance(wavelength, (Bandpass, float, type(None))):
raise TypeError("wavelength should either be a Bandpass, float, or None.")
# Now call _get_single_PSF().
psf = _get_single_PSF(SCA, bandpass, SCA_pos, pupil_bin,
n_waves, extra_aberrations, wavelength,
pupil_plane_type, gsparams)
return psf
|
57,751 |
def poll_field(args: Dict[str, Any]) -> Tuple[str, dict, dict]:
key = args.get('key', '').split(".")
regex = args.get('regex')
ignore_case = argToBoolean(args.get('ignore_case', 'False'))
regex_ignore_case_flag = re.IGNORECASE if ignore_case else 0
regex = re.compile(regex, regex_ignore_case_flag) if regex else None
context = demisto.context()
for k in key:
try:
context = context.get(k, {})
except Exception:
context = None
break
data = {
'key': '.'.join(key),
'exists': False
}
if context:
data['exists'] = check_key(context, regex)
context_value = {
'CheckContextKey(val.key == obj.key)': data
}
human_readable = 'The key exists.' if data['exists'] else 'The key does not exist.'
return human_readable, context_value, data
|
def poll_field(args: Dict[str, Any]) -> Tuple[str, dict, dict]:
key = args.get('key', '').split(".")
regex = args.get('regex')
ignore_case = argToBoolean(args.get('ignore_case', 'False'))
regex_ignore_case_flag = re.IGNORECASE if ignore_case else 0
regex = re.compile(regex, regex_ignore_case_flag) if regex else None
context = demisto.context()
for k in key:
try:
context = context.get(k, {})
except Exception:
context = None
break
data = {
'key': '.'.join(key),
'exists': check_key(context, regex)
}
context_value = {
'CheckContextKey(val.key == obj.key)': data
}
human_readable = 'The key exists.' if data['exists'] else 'The key does not exist.'
return human_readable, context_value, data
|
58,277 |
def test_subnetwork_unused_output():
with make_scope() as session:
net_dict = {
'sub': {
'class': 'subnetwork',
'from': [],
'subnetwork': {'linear': {'class': 'linear', 'from': 'base:data:data', 'n_out': 1},
'linear_0': {'class': 'linear', 'from': 'linear', 'n_out': 1},
'output': {'class': 'copy', 'from': 'linear'}}},
'linear': {'class': 'linear', 'from': ['sub/linear', 'sub/linear_0'], 'n_out': 1},
'output': {'class': 'copy', 'from': 'linear'}}
config = Config()
config.update(dict(num_inputs=1, num_outputs=1))
network = TFNetwork(config=config, train_flag=True)
network.construct_from_dict(net_dict)
|
def test_subnetwork_unused_output():
with make_scope() as session:
net_dict = {
'sub': {
'class': 'subnetwork',
'from': [],
'subnetwork': {
'linear': {'class': 'linear', 'from': 'base:data:data', 'n_out': 1},
'linear_0': {'class': 'linear', 'from': 'linear', 'n_out': 1},
'output': {'class': 'copy', 'from': 'linear'}}},
'linear': {'class': 'linear', 'from': ['sub/linear', 'sub/linear_0'], 'n_out': 1},
'output': {'class': 'copy', 'from': 'linear'}}
config = Config()
config.update(dict(num_inputs=1, num_outputs=1))
network = TFNetwork(config=config, train_flag=True)
network.construct_from_dict(net_dict)
|
55,648 |
def conv_distance_transform(
image: torch.Tensor,
kernel_size: int = 7
) -> torch.Tensor:
r"""Approximates the Manhattan distance transform of images using convolutions.
The value at each pixel in the output represents the distance to the nearest non-zero pixel in the image image.
The transformation is applied independently across the channel dimension of the images.
Args:
image: Image with shape :math:`(B,C,H,W)`.
kernel_size: size of the convolution kernel. Larger kernels are more accurate but less numerically stable.
Returns:
tensor with shape :math:`(B,C,H,W)`.
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"image type is not a torch.Tensor. Got {type(image)}")
if not len(image.shape) == 4:
raise ValueError(f"Invalid image shape, we expect BxCxHxW. Got: {image.shape}")
if kernel_size % 2 == 0:
raise ValueError("Kernel size must be an odd number.")
device: torch.device = image.device
n_iters = math.ceil(max(image.shape[2], image.shape[3]) / math.floor(kernel_size / 2))
kernel = make_cdt_kernel(kernel_size)
out = torch.zeros(image.shape, dtype=torch.float32, device=device)
# It is possible to avoid cloning the image if boundary = image, but this would require modifying the image tensor.
boundary = image.clone().to(torch.float32)
kernel = kernel.to(device)
# If image images have multiple channels, view the channels in the batch dimension to match kernel shape.
if image.shape[1] > 1:
batch_channel_view_shape = (image.shape[0] * image.shape[1], 1, image.shape[2], image.shape[3])
out = out.view(*batch_channel_view_shape)
boundary = boundary.view(*batch_channel_view_shape)
for i in range(n_iters):
cdt = F.conv2d(boundary, kernel, padding='same')
cdt = -0.35 * torch.log(cdt)
# We are calculating log(0) above.
cdt = torch.nan_to_num(cdt, posinf=0.0)
mask = cdt > 0
if mask.sum() == 0:
break
offset = i * kernel_size / 2
out[mask] += offset + cdt[mask]
boundary[mask] = 1
# View channels in the channel dimension, if they were added to batch dimension during transform.
if image.shape[1] > 1:
out = out.view(image.shape)
return out
|
def conv_distance_transform(
image: torch.Tensor,
kernel_size: int = 7
) -> torch.Tensor:
r"""Approximates the Manhattan distance transform of images using convolutions.
The value at each pixel in the output represents the distance to the nearest non-zero pixel in the image image.
The transformation is applied independently across the channel dimension of the images.
Args:
offset: int = i * kernel_size / 2
image: Image with shape :math:`(B,C,H,W)`.
kernel_size: size of the convolution kernel. Larger kernels are more accurate but less numerically stable.
Returns:
tensor with shape :math:`(B,C,H,W)`.
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"image type is not a torch.Tensor. Got {type(image)}")
if not len(image.shape) == 4:
raise ValueError(f"Invalid image shape, we expect BxCxHxW. Got: {image.shape}")
if kernel_size % 2 == 0:
raise ValueError("Kernel size must be an odd number.")
device: torch.device = image.device
n_iters = math.ceil(max(image.shape[2], image.shape[3]) / math.floor(kernel_size / 2))
kernel = make_cdt_kernel(kernel_size)
out = torch.zeros(image.shape, dtype=torch.float32, device=device)
# It is possible to avoid cloning the image if boundary = image, but this would require modifying the image tensor.
boundary = image.clone().to(torch.float32)
kernel = kernel.to(device)
# If image images have multiple channels, view the channels in the batch dimension to match kernel shape.
if image.shape[1] > 1:
batch_channel_view_shape = (image.shape[0] * image.shape[1], 1, image.shape[2], image.shape[3])
out = out.view(*batch_channel_view_shape)
boundary = boundary.view(*batch_channel_view_shape)
for i in range(n_iters):
cdt = F.conv2d(boundary, kernel, padding='same')
cdt = -0.35 * torch.log(cdt)
# We are calculating log(0) above.
cdt = torch.nan_to_num(cdt, posinf=0.0)
mask = cdt > 0
if mask.sum() == 0:
break
offset = i * kernel_size / 2
out[mask] += offset + cdt[mask]
boundary[mask] = 1
# View channels in the channel dimension, if they were added to batch dimension during transform.
if image.shape[1] > 1:
out = out.view(image.shape)
return out
|
32,212 |
def main():
params = demisto.params()
proxies = handle_proxy()
verify_certificate = not params.get('insecure', False)
url = params.get('InstanceURL')
credentials = params.get('credentials')
username = credentials.get('identifier')
password = credentials.get('password')
client_id = params.get('clientID')
client_secret = params.get('clientSecret')
object_name = params.get('object')
key_field = params.get('key_field')
query_filter = params.get('filter', None)
ignore_last_modified = params.get('ignore_last_modified')
fields = params.get('fields', None)
history = params.get('indicator_history', 365)
reputation = params.get('feedReputation', 'None')
command = demisto.command()
client = Client(url, username, password, client_id, client_secret, object_name, key_field,
query_filter, fields, history, ignore_last_modified, verify_certificate, proxies, reputation)
if command == 'test-module':
test_module(client)
elif command == 'fetch-indicators':
fetch_indicators_command(client)
elif command == 'salesforce-get-indicators':
fetch_indicators_command(client, manual_run=True)
|
def main():
params = demisto.params()
proxies = handle_proxy()
verify_certificate = not params.get('insecure', False)
url = params.get('InstanceURL')
credentials = params.get('credentials')
username = credentials.get('identifier')
password = credentials.get('password')
client_id = params.get('clientID')
client_secret = params.get('clientSecret')
object_name = params.get('object')
key_field = params.get('key_field')
query_filter = params.get('filter', None)
ignore_last_modified = params.get('ignore_last_modified', False)
fields = params.get('fields', None)
history = params.get('indicator_history', 365)
reputation = params.get('feedReputation', 'None')
command = demisto.command()
client = Client(url, username, password, client_id, client_secret, object_name, key_field,
query_filter, fields, history, ignore_last_modified, verify_certificate, proxies, reputation)
if command == 'test-module':
test_module(client)
elif command == 'fetch-indicators':
fetch_indicators_command(client)
elif command == 'salesforce-get-indicators':
fetch_indicators_command(client, manual_run=True)
|
1,818 |
def _unique(values, *, return_inverse=False):
"""Helper function to find uniques with support for python objects.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
unique : ndarray
The sorted uniique values
unique_inverse : ndarray
The indicies to reconstruct the original array from the unique array.
Only provided if `return_inverse` is True.
"""
if values.dtype == object:
return _unique_python(values, return_inverse=return_inverse)
# numerical
return np.unique(values, return_inverse=return_inverse)
|
def _unique(values, *, return_inverse=False):
"""Helper function to find unique values with support for python objects.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
unique : ndarray
The sorted uniique values
unique_inverse : ndarray
The indicies to reconstruct the original array from the unique array.
Only provided if `return_inverse` is True.
"""
if values.dtype == object:
return _unique_python(values, return_inverse=return_inverse)
# numerical
return np.unique(values, return_inverse=return_inverse)
|
10,999 |
def _named_row_unpickle(names, values):
return NamedValuesListIterable.create_namedtuple_class(*names)._make(values)
|
def _named_row_unpickle(names, values):
return NamedValuesListIterable.create_namedtuple_class(*names)(*values)
|
6,666 |
def convert_to_seconds(value, unit):
seconds = 0
if value == 0 or not value:
return seconds
if unit == 'Hours':
seconds = value * 3600
if unit == 'Minutes':
seconds = value * 60
return seconds
|
def convert_to_seconds(value, unit):
seconds = 0
if not value:
return seconds
if unit == 'Hours':
seconds = value * 3600
if unit == 'Minutes':
seconds = value * 60
return seconds
|
6,000 |
def _get_broadcasted_binary_op_result(obj1, obj2, cq, dtype_getter=None):
if dtype_getter is None:
dtype_getter = _get_common_dtype
if obj1.shape == obj2.shape:
return obj1._new_like_me(dtype_getter(obj1, obj2, cq),
cq)
elif obj1.shape == ():
return obj2._new_like_me(dtype_getter(obj1, obj2, cq),
cq)
elif obj2.shape == ():
return obj1._new_like_me(dtype_getter(obj1, obj2, cq),
cq)
else:
raise NotImplementedError("Broadcasting binary op with shapes:"
f" {obj1.shape}, {obj2.shape}.")
|
def _get_broadcasted_binary_op_result(obj1, obj2, cq, dtype_getter=None):
if dtype_getter is None:
dtype_getter = _get_common_dtype
if obj1.shape == obj2.shape:
return obj1._new_like_me(dtype_getter(obj1, obj2, cq),
cq)
elif obj1.shape == ():
return obj2._new_like_me(dtype_getter(obj1, obj2, cq),
cq)
elif obj2.shape == ():
return obj1._new_like_me(dtype_getter(obj1, obj2, cq),
cq)
else:
raise NotImplementedError("Broadcasting binary operator with shapes:"
f" {obj1.shape}, {obj2.shape}.")
|
21,174 |
def _contains_multiple_underscores(package_name: str) -> bool:
permitted_match = re.search(r"_{2,}", package_name, re.IGNORECASE)
return permitted_match is not None
|
def _contains_multiple_underscores(package_name: str) -> bool:
return "__" in package_name
|
53,194 |
def assert_non_cloud_metrics(aggregator):
"""Certain metrics cannot be collected in cloud environments due to disabled commands"""
tags = ['redis_host:{}'.format(common.HOST), 'redis_port:6382', 'redis_role:master']
aggregator.assert_metric('redis.net.connections', count=2, tags=tags + ['source:unknown'])
aggregator.assert_metric('redis.net.maxclients', count=2, tags=tags)
|
def assert_non_cloud_metrics(aggregator, tags):
"""Certain metrics cannot be collected in cloud environments due to disabled commands"""
aggregator.assert_metric('redis.net.connections', count=2, tags=tags + ['source:unknown'])
aggregator.assert_metric('redis.net.maxclients', count=2, tags=tags)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.