id
int64
11
59.9k
original
stringlengths
33
150k
modified
stringlengths
37
150k
54,507
def run(args: argparse.Namespace) -> None: kurobako_cmd = os.path.join(args.path_to_kurobako, "kurobako") subprocess.run(f"{kurobako_cmd} --version", shell=True) if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)): raise ValueError(f"Data directory {args.data_dir} cannot be found.") os.makedirs(args.out_dir, exist_ok=True) study_json_fn = os.path.join(args.out_dir, "studies.json") subprocess.check_call(f"echo >| {study_json_fn}", shell=True) solvers_filename = os.path.join(args.out_dir, "solvers.json") subprocess.check_call(f"echo >| {solvers_filename}", shell=True) problems_filename = os.path.join(args.out_dir, "problems.json") subprocess.check_call(f"echo >| {problems_filename}", shell=True) # Create ZDT problems cmd = f"{kurobako_cmd} problem-suite zdt | tee -a {problems_filename}" subprocess.run(cmd, shell=True) # Create NAS bench problem(C) (for Multi-Objective Settings). dataset = os.path.join(args.data_dir, "nasbench_full.bin") cmd = ( f'{kurobako_cmd} problem nasbench "{dataset}"' f"--encoding C --metrics accuracy params | tee -a {problems_filename}" ) subprocess.run(cmd, shell=True) # Create solvers. sampler_list = args.sampler_list.split() sampler_kwargs_list = args.sampler_kwargs_list.split() if len(sampler_list) != len(sampler_kwargs_list): raise ValueError( "The number of samplers does not match the given keyword arguments. \n" f"sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}." ) for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list): name = f"{args.name_prefix}_{sampler}" python_command = f"mo_runner.py {sampler} {sampler_kwargs}" cmd = ( f"{kurobako_cmd} solver --name {name} command python {python_command}" f"| tee -a {solvers_filename}" ) subprocess.run(cmd, shell=True) # Create study. cmd = ( f"{kurobako_cmd} studies --budget 1000 " f"--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) " f"--repeats {args.n_runs} --seed {args.seed} " f"> {study_json_fn}" ) subprocess.run(cmd, shell=True) result_filename = os.path.join(args.out_dir, "results.json") cmd = ( f"cat {study_json_fn} | {kurobako_cmd} run --parallelism {args.n_jobs} " f"> {result_filename}" ) subprocess.run(cmd, shell=True) # Report report_filename = os.path.join(args.out_dir, "report.md") cmd = f"cat {result_filename} | {kurobako_cmd} report > {report_filename}" subprocess.run(cmd, shell=True) # Plot pareto-front. problem_names = ["NASBench", "ZDT1", "ZDT2", "ZDT3", "ZDT4", "ZDT5", "ZDT6"] for problem_name in problem_names: cmd = ( f"cat {result_filename} | grep {problem_name} | " f"{kurobako_cmd} plot pareto-front -o {args.out_dir}" ) subprocess.run(cmd, shell=True)
def run(args: argparse.Namespace) -> None: kurobako_cmd = os.path.join(args.path_to_kurobako, "kurobako") subprocess.run(f"{kurobako_cmd} --version", shell=True) if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)): raise ValueError(f"Data directory {args.data_dir} cannot be found.") os.makedirs(args.out_dir, exist_ok=True) study_json_filename = os.path.join(args.out_dir, "studies.json") subprocess.check_call(f"echo >| {study_json_filename}", shell=True) solvers_filename = os.path.join(args.out_dir, "solvers.json") subprocess.check_call(f"echo >| {solvers_filename}", shell=True) problems_filename = os.path.join(args.out_dir, "problems.json") subprocess.check_call(f"echo >| {problems_filename}", shell=True) # Create ZDT problems cmd = f"{kurobako_cmd} problem-suite zdt | tee -a {problems_filename}" subprocess.run(cmd, shell=True) # Create NAS bench problem(C) (for Multi-Objective Settings). dataset = os.path.join(args.data_dir, "nasbench_full.bin") cmd = ( f'{kurobako_cmd} problem nasbench "{dataset}"' f"--encoding C --metrics accuracy params | tee -a {problems_filename}" ) subprocess.run(cmd, shell=True) # Create solvers. sampler_list = args.sampler_list.split() sampler_kwargs_list = args.sampler_kwargs_list.split() if len(sampler_list) != len(sampler_kwargs_list): raise ValueError( "The number of samplers does not match the given keyword arguments. \n" f"sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}." ) for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list): name = f"{args.name_prefix}_{sampler}" python_command = f"mo_runner.py {sampler} {sampler_kwargs}" cmd = ( f"{kurobako_cmd} solver --name {name} command python {python_command}" f"| tee -a {solvers_filename}" ) subprocess.run(cmd, shell=True) # Create study. cmd = ( f"{kurobako_cmd} studies --budget 1000 " f"--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) " f"--repeats {args.n_runs} --seed {args.seed} " f"> {study_json_fn}" ) subprocess.run(cmd, shell=True) result_filename = os.path.join(args.out_dir, "results.json") cmd = ( f"cat {study_json_fn} | {kurobako_cmd} run --parallelism {args.n_jobs} " f"> {result_filename}" ) subprocess.run(cmd, shell=True) # Report report_filename = os.path.join(args.out_dir, "report.md") cmd = f"cat {result_filename} | {kurobako_cmd} report > {report_filename}" subprocess.run(cmd, shell=True) # Plot pareto-front. problem_names = ["NASBench", "ZDT1", "ZDT2", "ZDT3", "ZDT4", "ZDT5", "ZDT6"] for problem_name in problem_names: cmd = ( f"cat {result_filename} | grep {problem_name} | " f"{kurobako_cmd} plot pareto-front -o {args.out_dir}" ) subprocess.run(cmd, shell=True)
33,113
def rk4(f, x, t, dt, order=4): """Runge-Kutta (explicit, non-adaptive) numerical ODE solvers. Parameters ---------- f : function The forcing of the ODE must a function of the form f(t, x) x : ndarray or float State vector of the forcing term t : float Starting time of the integration dt : float Time step interval of the ODE solver order : int, optional The order of RK method. Default: 4 Returns ------- ndarray State vector at the new time step t+dt """ if order >=1: k1 = dt * f(t , x) # noqa if order >=2: k2 = dt * f(t+dt/2, x+k1/2) # noqa if order ==3: k3 = dt * f(t+dt , x+k2*2-k1) # noqa if order ==4: # noqa k3 = dt * f(t+dt/2, x+k2/2) # noqa k4 = dt * f(t+dt , x+k3) # noqa if order ==1: return x + k1 # noqa elif order ==2: return x + k2 # noqa elif order ==3: return x + (k1 + 4*k2 + k3)/6 # noqa elif order ==4: return x + (k1 + 2*(k2 + k3) + k4)/6 # noqa else: raise NotImplementedError # noqa # fmt: on
def rk4(f, x, t, dt, order=4): """Runge-Kutta (explicit, non-adaptive) numerical ODE solvers. Parameters ---------- f : function The forcing of the ODE must a function of the form f(t, x) x : ndarray or float State vector of the forcing term t : float Starting time of the integration dt : float Integration time step. order : int, optional The order of RK method. Default: 4 Returns ------- ndarray State vector at the new time step t+dt """ if order >=1: k1 = dt * f(t , x) # noqa if order >=2: k2 = dt * f(t+dt/2, x+k1/2) # noqa if order ==3: k3 = dt * f(t+dt , x+k2*2-k1) # noqa if order ==4: # noqa k3 = dt * f(t+dt/2, x+k2/2) # noqa k4 = dt * f(t+dt , x+k3) # noqa if order ==1: return x + k1 # noqa elif order ==2: return x + k2 # noqa elif order ==3: return x + (k1 + 4*k2 + k3)/6 # noqa elif order ==4: return x + (k1 + 2*(k2 + k3) + k4)/6 # noqa else: raise NotImplementedError # noqa # fmt: on
30,577
def http_request(method, endpoint, params=None, token=False): """ make api call """ if not token: return_error("No authorization token provided") head = make_headers(endpoint, token) url = make_url(endpoint) demisto.info("Making request to {} with params: {}".format(url, params)) r = requests.request( method, url, params=params, headers=head, verify=VERIFY_CERTIFICATES ) if r.status_code != 200: demisto.error(r.text) return_error('Error in API call [%d] - %s' % (r.status_code, r.reason)) try: res_json = r.json() return res_json except json.decoder.JSONDecodeError as err: raise ValueError('Failed to parse response as JSON. Original response:\n{rtext}.\nError: {error}' .format(rtext=r.text, error=str(err)))
def http_request(method, endpoint, params=None, token=False): """ make api call """ if not token: return_error("No authorization token provided") head = make_headers(endpoint, token) url = make_url(endpoint) demisto.debug("Making request to {} with params: {}".format(url, params)) r = requests.request( method, url, params=params, headers=head, verify=VERIFY_CERTIFICATES ) if r.status_code != 200: demisto.error(r.text) return_error('Error in API call [%d] - %s' % (r.status_code, r.reason)) try: res_json = r.json() return res_json except json.decoder.JSONDecodeError as err: raise ValueError('Failed to parse response as JSON. Original response:\n{rtext}.\nError: {error}' .format(rtext=r.text, error=str(err)))
30,428
def build_misp_complex_filter(demisto_query: str): """ Args: demisto_query: complex query contains saved words: 'AND:', 'OR:' and 'NOT:' using ',' as delimiter for parameters and ';' as delimiter for operators. using the operators is optional. if 'demisto_query' does not contains any of the complex operators the original input will be returned Returns: dict: dictionary created for misp to perform complex auery or if no complex qury found retruns the original input Example: demisto_query should look like: example 1: "AND:param1,param2;OR:param3;NOT:param4,param5" example 2: "NOT:param3,param5" example 3 (simple syntax): "param1,param2" """ regexAnd = r"(AND:)([^\;]+)(;)" regexOr = r"(OR:)([^\;]+)(;)" regexNot = r"(NOT:)([^\;]+)(;)" andList = None orList = None notList = None isComplexSearch = False matchAnd = re.search(regexAnd, demisto_query, re.MULTILINE) matchOr = re.search(regexOr, demisto_query, re.MULTILINE) matchNot = re.search(regexNot, demisto_query, re.MULTILINE) if matchAnd is not None: andList = matchAnd.group(2).split(',') isComplexSearch = True if matchOr is not None: orList = matchOr.group(2).split(',') isComplexSearch = True if matchNot is not None: notList = matchNot.group(2).split(',') isComplexSearch = True if isComplexSearch: misp_complex_query = MISP.build_complex_query( or_parameters = orList, and_parameters = andList, not_parameters = notList) return misp_complex_query return demisto_query
def build_misp_complex_filter(demisto_query: str): """ Args: demisto_query: complex query contains saved words: 'AND:', 'OR:' and 'NOT:' using ',' as delimiter for parameters and ';' as delimiter for operators. using the operators is optional. if 'demisto_query' does not contains any of the complex operators the original input will be returned Returns: dict: dictionary created for misp to perform complex auery or if no complex qury found retruns the original input Example: demisto_query should look like: example 1: "AND:param1,param2;OR:param3;NOT:param4,param5" example 2: "NOT:param3,param5" example 3 (simple syntax): "param1,param2" """ regexAnd = r"(AND:)([^\;]+)(;)" regexOr = r'(OR:)([^\;]+)(;)' regexNot = r"(NOT:)([^\;]+)(;)" andList = None orList = None notList = None isComplexSearch = False matchAnd = re.search(regexAnd, demisto_query, re.MULTILINE) matchOr = re.search(regexOr, demisto_query, re.MULTILINE) matchNot = re.search(regexNot, demisto_query, re.MULTILINE) if matchAnd is not None: andList = matchAnd.group(2).split(',') isComplexSearch = True if matchOr is not None: orList = matchOr.group(2).split(',') isComplexSearch = True if matchNot is not None: notList = matchNot.group(2).split(',') isComplexSearch = True if isComplexSearch: misp_complex_query = MISP.build_complex_query( or_parameters = orList, and_parameters = andList, not_parameters = notList) return misp_complex_query return demisto_query
34,786
def _core_config_to_predict_graph_schema( config: Dict[Text, Any] ) -> Tuple[Dict[Text, Any], List[Text]]: core_predict_graph = {} policies = deepcopy(config["policies"]) policy_names = [] e2e = False for i, policy in enumerate(policies): policy_name = policy.pop("name") unique_policy_name = f"{policy_name}_{i}" policy_names.append(unique_policy_name) policy_class = rasa.core.registry.policy_from_module_path(policy_name) policy_step = { unique_policy_name: { "uses": policy_class, "constructor_name": "load", "fn": "predict_action_probabilities", "config": {"resource_name": unique_policy_name, **policy}, "needs": {"tracker": "add_parsed_nlu_message", "domain": "load_domain"}, }, } if ( "e2e_features" in inspect.signature(policy_class.predict_action_probabilities).parameters ): policy_step[unique_policy_name]["needs"][ "e2e_features" ] = "create_e2e_lookup" e2e = True core_predict_graph.update(policy_step) if e2e: nlu_e2e_predict_graph_schema, nlu_e2e_out = _nlu_config_to_predict_graph_schema( config, input_task="convert_tracker_for_e2e", classify=False, component_namespace="core", ) e2e_part = { "convert_tracker_for_e2e": { "uses": StoryToTrainingDataConverter, "fn": "convert_for_inference", "config": {}, "needs": {"tracker": "add_parsed_nlu_message",}, "persistor": False, }, "create_e2e_lookup": { "uses": MessageToE2EFeatureConverter, "fn": "convert", "config": {}, "needs": {"messages": nlu_e2e_out}, "persistor": False, }, **nlu_e2e_predict_graph_schema, } core_predict_graph.update(e2e_part) return core_predict_graph, policy_names
def _core_config_to_predict_graph_schema( config: Dict[Text, Any] ) -> Tuple[Dict[Text, Any], List[Text]]: core_predict_graph = {} policies = deepcopy(config["policies"]) policy_names = [] e2e = False for i, policy in enumerate(policies): policy_name = policy.pop("name") unique_policy_name = f"{policy_name}_{i}" policy_names.append(unique_policy_name) policy_class = rasa.core.registry.policy_from_module_path(policy_name) policy_step = { unique_policy_name: { "uses": policy_class, "constructor_name": "load", "fn": "predict_action_probabilities", "config": {"resource_name": unique_policy_name, **policy}, "needs": {"tracker": "add_parsed_nlu_message", "domain": "load_domain"}, }, } if ( "e2e_features" in inspect.signature(policy_class.predict_action_probabilities).parameters ): policy_step[unique_policy_name]["needs"][ "e2e_features" ] = "create_e2e_lookup" e2e = True core_predict_graph.update(policy_step) if e2e: nlu_e2e_predict_graph_schema, nlu_e2e_out = _nlu_config_to_predict_graph_schema( config, input_task="convert_tracker_for_e2e", classify=False, component_namespace="core", ) e2e_part = { "convert_tracker_for_e2e": { "uses": StoryToTrainingDataConverter, "fn": "convert_for_inference", "config": {}, "needs": {"tracker": "add_parsed_nlu_message"}, "persistor": False, }, "create_e2e_lookup": { "uses": MessageToE2EFeatureConverter, "fn": "convert", "config": {}, "needs": {"messages": nlu_e2e_out}, "persistor": False, }, **nlu_e2e_predict_graph_schema, } core_predict_graph.update(e2e_part) return core_predict_graph, policy_names
44,112
def expval_hessian_param_shift( tape, argnum, diff_methods, diagonal_shifts, off_diagonal_shifts, f0 ): r"""Generate the Hessian tapes that are used in the computation of the second derivative of a quantum tape, using analytical parameter-shift rules to do so exactly. Also define a post-processing function to combine the results of evaluating the Hessian tapes. Args: tape (pennylane.QNode or .QuantumTape): quantum tape or QNode to differentiate argnum (int or list[int] or None): Parameter indices to differentiate with respect to. If not provided, the Hessian with respect to all trainable indices is returned. Note that the indices refer to tape parameters if ``tape`` is a tape, and to QNode arguments if it is a QNode. diff_methods (list[string]): The differentiation method to use for each trainable parameter. Can be "A" or "0", where "A" is the analytical parameter shift rule and "0" indicates a 0 derivative (that is the parameter does not affect the tape's output). diagonal_shifts (list[tuple[int or float]]): List containing tuples of shift values for the Hessian diagonal. If provided, one tuple of shifts should be given per trainable parameter and the tuple length should match the number of frequencies for that parameter. If unspecified, equidistant shifts are used. off_diagonal_shifts (list[tuple[int or float]]): List containing tuples of shift values for the off-diagonal entries of the Hessian. If provided, one tuple of shifts should be given per trainable parameter and the tuple should match the number of frequencies for that parameter. The combination of shifts into bivariate shifts is performed automatically. If unspecified, equidistant shifts are used. f0 (tensor_like[float] or None): Output of the evaluated input tape. If provided, and the Hessian tapes include the original input tape, the 'f0' value is used instead of evaluating the input tape, reducing the number of device invocations. Returns: tuple[list[QuantumTape], function]: A tuple containing a list of generated tapes, in addition to a post-processing function to be applied to the results of the evaluated tapes. """ # pylint: disable=too-many-arguments, too-many-statements argnum = tape.trainable_params if argnum is None else argnum h_dim = tape.num_params unshifted_coeffs = {} # Marks whether we will need to add the unshifted tape to all Hessian tapes. add_unshifted = f0 is None # Assemble all univariate recipes for the diagonal and as partial components for the # off-diagonal entries. diag_recipes, partial_offdiag_recipes = _collect_recipes( tape, argnum, diff_methods, diagonal_shifts, off_diagonal_shifts ) hessian_tapes = [] hessian_coeffs = [] for i in range(h_dim): # The diagonal recipe is None if the parameter is not trainable or not in argnum if diag_recipes[i] is None: hessian_coeffs.extend([None] * (h_dim - i)) continue # Obtain the recipe for the diagonal. dc_i, dm_i, ds_i = diag_recipes[i] # Add the unshifted tape if it is required for this diagonal, it has not been # added yet, and it is required because f0 was not provided. if ds_i[0] == 0 and dm_i[0] == 1.0: if add_unshifted: hessian_tapes.insert(0, tape) add_unshifted = False unshifted_coeffs[(i, i)] = dc_i[0] dc_i, dm_i, ds_i = dc_i[1:], dm_i[1:], ds_i[1:] # Create the shifted tapes for the diagonal entry and store them along with coefficients diag_tapes = generate_shifted_tapes(tape, i, ds_i, dm_i) hessian_tapes.extend(diag_tapes) hessian_coeffs.append(dc_i) recipe_i = partial_offdiag_recipes[i] for j in range(i + 1, h_dim): recipe_j = partial_offdiag_recipes[j] # Create tapes and coefficients for the off-diagonal entry by combining # the two univariate first-order derivative recipes. off_diag_data = _generate_off_diag_tapes(tape, (i, j), recipe_i, recipe_j) hessian_tapes.extend(off_diag_data[0]) hessian_coeffs.append(off_diag_data[1]) # It should not be possible to obtain an unshifted tape for the off-diagonal # terms if there hasn't already been one for the diagonal terms. # TODO: This will depend on the decision on how diagonal_shifts are formatted. # TODO: If this is confirmed, remove the following safety check if off_diag_data[2] is not None: raise ValueError( "A tape without parameter shifts was created unexpectedly during " "the computation of the Hessian. Please submit a bug report " "at https://github.com/PennyLaneAI/pennylane/issues" ) # pragma: no cover def processing_fn(results): # Apply the same squeezing as in qml.QNode to make the transform output consistent. # pylint: disable=protected-access if tape._qfunc_output is not None and not isinstance(tape._qfunc_output, Sequence): results = qml.math.squeeze(qml.math.stack(results)) # The first results dimension is the number of terms/tapes in the parameter-shift # rule, the remaining ones are the QNode output dimensions. out_dim = qml.math.shape(results)[1:] # The desired shape of the Hessian is: # (QNode output dimensions, # trainable gate args, # trainable gate args), # but first we accumulate all elements into a list, since no array assignment is possible. hessian = [] # Keep track of tape results already consumed. start = int(bool(unshifted_coeffs) and f0 is None) # Results of the unshifted tape. r0 = results[0] if start == 1 else f0 for i, j in it.product(range(h_dim), repeat=2): if j < i: hessian.append(hessian[j * h_dim + i]) continue k = i * h_dim + j - i * (i + 1) // 2 coeffs = hessian_coeffs[k] if coeffs is None or len(coeffs) == 0: hessian.append(qml.math.zeros(out_dim)) continue res = results[start : start + len(coeffs)] start = start + len(coeffs) res = qml.math.stack(res) coeffs = qml.math.cast(qml.math.convert_like(coeffs, res), res.dtype) hess = qml.math.tensordot(res, coeffs, [[0], [0]]) if (i, j) in unshifted_coeffs: hess = hess + unshifted_coeffs[(i, j)] * r0 hessian.append(hess) # Reshape the Hessian to have the QNode output dimensions on the outside, that is: # (h_dim*h_dim, *out_dims) -> (h_dim, h_dim, *out_dims) -> (*out_dims, h_dim, h_dim) # Remember: h_dim = num_gate_args hessian = qml.math.reshape(qml.math.stack(hessian), (h_dim, h_dim) + out_dim) reordered_axes = list(range(2, len(out_dim) + 2)) + [0, 1] return qml.math.transpose(hessian, axes=reordered_axes) return hessian_tapes, processing_fn
def expval_hessian_param_shift( tape, argnum, diff_methods, diagonal_shifts, off_diagonal_shifts, f0 ): r"""Generate the Hessian tapes that are used in the computation of the second derivative of a quantum tape, using analytical parameter-shift rules to do so exactly. Also define a post-processing function to combine the results of evaluating the Hessian tapes. Args: tape (.QuantumTape): quantum tape to differentiate argnum (int or list[int] or None): Parameter indices to differentiate with respect to. If not provided, the Hessian with respect to all trainable indices is returned. Note that the indices refer to tape parameters if ``tape`` is a tape, and to QNode arguments if it is a QNode. diff_methods (list[string]): The differentiation method to use for each trainable parameter. Can be "A" or "0", where "A" is the analytical parameter shift rule and "0" indicates a 0 derivative (that is the parameter does not affect the tape's output). diagonal_shifts (list[tuple[int or float]]): List containing tuples of shift values for the Hessian diagonal. If provided, one tuple of shifts should be given per trainable parameter and the tuple length should match the number of frequencies for that parameter. If unspecified, equidistant shifts are used. off_diagonal_shifts (list[tuple[int or float]]): List containing tuples of shift values for the off-diagonal entries of the Hessian. If provided, one tuple of shifts should be given per trainable parameter and the tuple should match the number of frequencies for that parameter. The combination of shifts into bivariate shifts is performed automatically. If unspecified, equidistant shifts are used. f0 (tensor_like[float] or None): Output of the evaluated input tape. If provided, and the Hessian tapes include the original input tape, the 'f0' value is used instead of evaluating the input tape, reducing the number of device invocations. Returns: tuple[list[QuantumTape], function]: A tuple containing a list of generated tapes, in addition to a post-processing function to be applied to the results of the evaluated tapes. """ # pylint: disable=too-many-arguments, too-many-statements argnum = tape.trainable_params if argnum is None else argnum h_dim = tape.num_params unshifted_coeffs = {} # Marks whether we will need to add the unshifted tape to all Hessian tapes. add_unshifted = f0 is None # Assemble all univariate recipes for the diagonal and as partial components for the # off-diagonal entries. diag_recipes, partial_offdiag_recipes = _collect_recipes( tape, argnum, diff_methods, diagonal_shifts, off_diagonal_shifts ) hessian_tapes = [] hessian_coeffs = [] for i in range(h_dim): # The diagonal recipe is None if the parameter is not trainable or not in argnum if diag_recipes[i] is None: hessian_coeffs.extend([None] * (h_dim - i)) continue # Obtain the recipe for the diagonal. dc_i, dm_i, ds_i = diag_recipes[i] # Add the unshifted tape if it is required for this diagonal, it has not been # added yet, and it is required because f0 was not provided. if ds_i[0] == 0 and dm_i[0] == 1.0: if add_unshifted: hessian_tapes.insert(0, tape) add_unshifted = False unshifted_coeffs[(i, i)] = dc_i[0] dc_i, dm_i, ds_i = dc_i[1:], dm_i[1:], ds_i[1:] # Create the shifted tapes for the diagonal entry and store them along with coefficients diag_tapes = generate_shifted_tapes(tape, i, ds_i, dm_i) hessian_tapes.extend(diag_tapes) hessian_coeffs.append(dc_i) recipe_i = partial_offdiag_recipes[i] for j in range(i + 1, h_dim): recipe_j = partial_offdiag_recipes[j] # Create tapes and coefficients for the off-diagonal entry by combining # the two univariate first-order derivative recipes. off_diag_data = _generate_off_diag_tapes(tape, (i, j), recipe_i, recipe_j) hessian_tapes.extend(off_diag_data[0]) hessian_coeffs.append(off_diag_data[1]) # It should not be possible to obtain an unshifted tape for the off-diagonal # terms if there hasn't already been one for the diagonal terms. # TODO: This will depend on the decision on how diagonal_shifts are formatted. # TODO: If this is confirmed, remove the following safety check if off_diag_data[2] is not None: raise ValueError( "A tape without parameter shifts was created unexpectedly during " "the computation of the Hessian. Please submit a bug report " "at https://github.com/PennyLaneAI/pennylane/issues" ) # pragma: no cover def processing_fn(results): # Apply the same squeezing as in qml.QNode to make the transform output consistent. # pylint: disable=protected-access if tape._qfunc_output is not None and not isinstance(tape._qfunc_output, Sequence): results = qml.math.squeeze(qml.math.stack(results)) # The first results dimension is the number of terms/tapes in the parameter-shift # rule, the remaining ones are the QNode output dimensions. out_dim = qml.math.shape(results)[1:] # The desired shape of the Hessian is: # (QNode output dimensions, # trainable gate args, # trainable gate args), # but first we accumulate all elements into a list, since no array assignment is possible. hessian = [] # Keep track of tape results already consumed. start = int(bool(unshifted_coeffs) and f0 is None) # Results of the unshifted tape. r0 = results[0] if start == 1 else f0 for i, j in it.product(range(h_dim), repeat=2): if j < i: hessian.append(hessian[j * h_dim + i]) continue k = i * h_dim + j - i * (i + 1) // 2 coeffs = hessian_coeffs[k] if coeffs is None or len(coeffs) == 0: hessian.append(qml.math.zeros(out_dim)) continue res = results[start : start + len(coeffs)] start = start + len(coeffs) res = qml.math.stack(res) coeffs = qml.math.cast(qml.math.convert_like(coeffs, res), res.dtype) hess = qml.math.tensordot(res, coeffs, [[0], [0]]) if (i, j) in unshifted_coeffs: hess = hess + unshifted_coeffs[(i, j)] * r0 hessian.append(hess) # Reshape the Hessian to have the QNode output dimensions on the outside, that is: # (h_dim*h_dim, *out_dims) -> (h_dim, h_dim, *out_dims) -> (*out_dims, h_dim, h_dim) # Remember: h_dim = num_gate_args hessian = qml.math.reshape(qml.math.stack(hessian), (h_dim, h_dim) + out_dim) reordered_axes = list(range(2, len(out_dim) + 2)) + [0, 1] return qml.math.transpose(hessian, axes=reordered_axes) return hessian_tapes, processing_fn
6,325
def fromEpoch(epoch): """ Get datetime object from epoch """ return datetime.datetime.utcnow().utcfromtimestamp(epoch)
def fromEpoch(epoch): """ Get datetime object from epoch """ return datetime.datetime.utcfromtimestamp(epoch)
2,231
def fastica( X, n_components=None, *, algorithm="parallel", whiten=None, fun="logcosh", fun_args=None, max_iter=200, tol=1e-04, w_init=None, random_state=None, return_X_mean=False, compute_sources=True, return_n_iter=False, ): """Perform Fast Independent Component Analysis. The implementation is based on [1]_. Read more in the :ref:`User Guide <ICA>`. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. n_components : int, default=None Number of components to extract. If None no dimension reduction is performed. algorithm : {'parallel', 'deflation'}, default='parallel' Apply a parallel or deflational FASTICA algorithm. whiten : str or bool, default=None Specify the whitening strategy to use. If 'arbitrary-variance', a whitening with variance arbitrary is used. If 'unit-variance', the whitening variance is adjusted to be unitary. If False, the data is already considered to be whitened, and no whitening is performed. If None (default), 'arbitrary-variance' is used. .. deprecated:: 1.1 From version 1.3 whiten='unit-variance' will be used by default. `whiten=True` is deprecated from 1.1 and will be removed in 1.3. Use `whiten=arbitrary-variance` instead. fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh' The functional form of the G function used in the approximation to neg-entropy. Could be either 'logcosh', 'exp', or 'cube'. You can also provide your own function. It should return a tuple containing the value of the function, and of its derivative, in the point. The derivative should be averaged along its last dimension. Example: def my_g(x): return x ** 3, np.mean(3 * x ** 2, axis=-1) fun_args : dict, default=None Arguments to send to the functional form. If empty or None and if fun='logcosh', fun_args will take value {'alpha' : 1.0} max_iter : int, default=200 Maximum number of iterations to perform. tol : float, default=1e-04 A positive scalar giving the tolerance at which the un-mixing matrix is considered to have converged. w_init : ndarray of shape (n_components, n_components), default=None Initial un-mixing array of dimension (n.comp,n.comp). If None (default) then an array of normal r.v.'s is used. random_state : int, RandomState instance or None, default=None Used to initialize ``w_init`` when not specified, with a normal distribution. Pass an int, for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. return_X_mean : bool, default=False If True, X_mean is returned too. compute_sources : bool, default=True If False, sources are not computed, but only the rotation matrix. This can save memory when working with big data. Defaults to True. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- K : ndarray of shape (n_components, n_features) or None If whiten is 'True', K is the pre-whitening matrix that projects data onto the first n_components principal components. If whiten is 'False', K is 'None'. W : ndarray of shape (n_components, n_components) The square matrix that unmixes the data after whitening. The mixing matrix is the pseudo-inverse of matrix ``W K`` if K is not None, else it is the inverse of W. S : ndarray of shape (n_samples, n_components) or None Estimated source matrix X_mean : ndarray of shape (n_features,) The mean over features. Returned only if return_X_mean is True. n_iter : int If the algorithm is "deflation", n_iter is the maximum number of iterations run across all components. Else they are just the number of iterations taken to converge. This is returned only when return_n_iter is set to `True`. Notes ----- The data matrix X is considered to be a linear combination of non-Gaussian (independent) components i.e. X = AS where columns of S contain the independent components and A is a linear mixing matrix. In short ICA attempts to `un-mix' the data by estimating an un-mixing matrix W where ``S = W K X.`` While FastICA was proposed to estimate as many sources as features, it is possible to estimate less by setting n_components < n_features. It this case K is not a square matrix and the estimated A is the pseudo-inverse of ``W K``. This implementation was originally made for data of shape [n_features, n_samples]. Now the input is transposed before the algorithm is applied. This makes it slightly faster for Fortran-ordered input. References ---------- .. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis", Algorithms and Applications, Neural Networks, 13(4-5), 2000, pp. 411-430. """ est = FastICA( n_components=n_components, algorithm=algorithm, whiten=whiten, fun=fun, fun_args=fun_args, max_iter=max_iter, tol=tol, w_init=w_init, random_state=random_state, ) S = est._fit(X, compute_sources=compute_sources) if est.whiten_ in ["unitary-variance", "arbitrary-variance"]: K = est.whitening_ X_mean = est.mean_ else: K = None X_mean = None returned_values = [K, est._unmixing, S] if return_X_mean: returned_values.append(X_mean) if return_n_iter: returned_values.append(est.n_iter_) return returned_values
def fastica( X, n_components=None, *, algorithm="parallel", whiten=None, fun="logcosh", fun_args=None, max_iter=200, tol=1e-04, w_init=None, random_state=None, return_X_mean=False, compute_sources=True, return_n_iter=False, ): """Perform Fast Independent Component Analysis. The implementation is based on [1]_. Read more in the :ref:`User Guide <ICA>`. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. n_components : int, default=None Number of components to extract. If None no dimension reduction is performed. algorithm : {'parallel', 'deflation'}, default='parallel' Apply a parallel or deflational FASTICA algorithm. whiten : str or bool, default="warn" Specify the whitening strategy to use. If 'arbitrary-variance', a whitening with variance arbitrary is used. If 'unit-variance', the whitening variance is adjusted to be unitary. If False, the data is already considered to be whitened, and no whitening is performed. If None (default), 'arbitrary-variance' is used. .. deprecated:: 1.1 From version 1.3 whiten='unit-variance' will be used by default. `whiten=True` is deprecated from 1.1 and will be removed in 1.3. Use `whiten=arbitrary-variance` instead. fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh' The functional form of the G function used in the approximation to neg-entropy. Could be either 'logcosh', 'exp', or 'cube'. You can also provide your own function. It should return a tuple containing the value of the function, and of its derivative, in the point. The derivative should be averaged along its last dimension. Example: def my_g(x): return x ** 3, np.mean(3 * x ** 2, axis=-1) fun_args : dict, default=None Arguments to send to the functional form. If empty or None and if fun='logcosh', fun_args will take value {'alpha' : 1.0} max_iter : int, default=200 Maximum number of iterations to perform. tol : float, default=1e-04 A positive scalar giving the tolerance at which the un-mixing matrix is considered to have converged. w_init : ndarray of shape (n_components, n_components), default=None Initial un-mixing array of dimension (n.comp,n.comp). If None (default) then an array of normal r.v.'s is used. random_state : int, RandomState instance or None, default=None Used to initialize ``w_init`` when not specified, with a normal distribution. Pass an int, for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. return_X_mean : bool, default=False If True, X_mean is returned too. compute_sources : bool, default=True If False, sources are not computed, but only the rotation matrix. This can save memory when working with big data. Defaults to True. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- K : ndarray of shape (n_components, n_features) or None If whiten is 'True', K is the pre-whitening matrix that projects data onto the first n_components principal components. If whiten is 'False', K is 'None'. W : ndarray of shape (n_components, n_components) The square matrix that unmixes the data after whitening. The mixing matrix is the pseudo-inverse of matrix ``W K`` if K is not None, else it is the inverse of W. S : ndarray of shape (n_samples, n_components) or None Estimated source matrix X_mean : ndarray of shape (n_features,) The mean over features. Returned only if return_X_mean is True. n_iter : int If the algorithm is "deflation", n_iter is the maximum number of iterations run across all components. Else they are just the number of iterations taken to converge. This is returned only when return_n_iter is set to `True`. Notes ----- The data matrix X is considered to be a linear combination of non-Gaussian (independent) components i.e. X = AS where columns of S contain the independent components and A is a linear mixing matrix. In short ICA attempts to `un-mix' the data by estimating an un-mixing matrix W where ``S = W K X.`` While FastICA was proposed to estimate as many sources as features, it is possible to estimate less by setting n_components < n_features. It this case K is not a square matrix and the estimated A is the pseudo-inverse of ``W K``. This implementation was originally made for data of shape [n_features, n_samples]. Now the input is transposed before the algorithm is applied. This makes it slightly faster for Fortran-ordered input. References ---------- .. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis", Algorithms and Applications, Neural Networks, 13(4-5), 2000, pp. 411-430. """ est = FastICA( n_components=n_components, algorithm=algorithm, whiten=whiten, fun=fun, fun_args=fun_args, max_iter=max_iter, tol=tol, w_init=w_init, random_state=random_state, ) S = est._fit(X, compute_sources=compute_sources) if est.whiten_ in ["unitary-variance", "arbitrary-variance"]: K = est.whitening_ X_mean = est.mean_ else: K = None X_mean = None returned_values = [K, est._unmixing, S] if return_X_mean: returned_values.append(X_mean) if return_n_iter: returned_values.append(est.n_iter_) return returned_values
39,039
def _load_pipeline_bootstrap_context() -> Dict: bootstrap_command_names = ["pipeline", "bootstrap"] section = "parameters" context: Dict = {} config = SamConfig(PIPELINE_CONFIG_DIR, PIPELINE_CONFIG_FILENAME) if not config.exists(): context[str(["environment_names_message"])] = "" return context # config.get_env_names() will return the list of # bootstrapped env names and "default" which is used to store shared values # we don't want to include "default" here. env_names = [env_name for env_name in config.get_env_names() if env_name != "default"] for env in env_names: for key, value in config.get_all(bootstrap_command_names, section, env).items(): context[str([env, key])] = value # pre-load the list of env names detected from pipelineconfig.toml environment_names_message = ( "Here are the environment names detected " + f"in {os.path.join(PIPELINE_CONFIG_DIR, PIPELINE_CONFIG_FILENAME)}:\n" + "\n".join([f"- {env_name}" for env_name in env_names]) ) context[str(["environment_names_message"])] = environment_names_message return context
def _load_pipeline_bootstrap_context() -> Dict: bootstrap_command_names = ["pipeline", "bootstrap"] section = "parameters" context: Dict = {} config = SamConfig(PIPELINE_CONFIG_DIR, PIPELINE_CONFIG_FILENAME) if not config.exists(): context[str(["environment_names_message"])] = "" return context # config.get_env_names() will return the list of # bootstrapped env names and "default" which is used to store shared values # we don't want to include "default" here. env_names = [env_name for env_name in config.get_env_names() if env_name != "default"] for env in env_names: for key, value in config.get_all(bootstrap_command_names, section, env).items(): context[str([env, key])] = value # pre-load the list of env names detected from pipelineconfig.toml environment_names_message = ( "Here are the environment names detected " + f"in {os.path.join(PIPELINE_CONFIG_DIR, PIPELINE_CONFIG_FILENAME)}:\n" + "\n".join([f"\t- {env_name}" for env_name in env_names]) ) context[str(["environment_names_message"])] = environment_names_message return context
2,823
def additive_chi2_kernel(X, Y=None): """Compute the additive chi-squared kernel between observations in X and Y. The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = -Sum [(x - y)^2 / (x + y)] It can be interpreted as a weighted difference per entry. Read more in the :ref:`User Guide <chi2_kernel>`. Parameters ---------- X : array-like of shape (n_samples_X, n_features) Input array/matrix X. Y : ndarray of shape (n_samples_Y, n_features), default=None If `None`, uses `Y=X`. Returns ------- kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y) Returns the additive chi-squared kernel between observations in X and Y. See Also -------- chi2_kernel : The exponentiated version of the kernel, which is usually preferable. sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to this kernel. Notes ----- As the negative of a distance, this kernel is only conditionally positive definite. References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 https://hal.archives-ouvertes.fr/hal-00171412/document """ if issparse(X) or issparse(Y): raise ValueError("additive_chi2 does not support sparse matrices.") X, Y = check_pairwise_arrays(X, Y) if (X < 0).any(): raise ValueError("X contains negative values.") if Y is not X and (Y < 0).any(): raise ValueError("Y contains negative values.") result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype) _chi2_kernel_fast(X, Y, result) return result
def additive_chi2_kernel(X, Y=None): """Compute the additive chi-squared kernel between observations in X and Y. The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = -Sum [(x - y)^2 / (x + y)] It can be interpreted as a weighted difference per entry. Read more in the :ref:`User Guide <chi2_kernel>`. Parameters ---------- X : array-like of shape (n_samples_X, n_features) A feature array. Y : ndarray of shape (n_samples_Y, n_features), default=None If `None`, uses `Y=X`. Returns ------- kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y) Returns the additive chi-squared kernel between observations in X and Y. See Also -------- chi2_kernel : The exponentiated version of the kernel, which is usually preferable. sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to this kernel. Notes ----- As the negative of a distance, this kernel is only conditionally positive definite. References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 https://hal.archives-ouvertes.fr/hal-00171412/document """ if issparse(X) or issparse(Y): raise ValueError("additive_chi2 does not support sparse matrices.") X, Y = check_pairwise_arrays(X, Y) if (X < 0).any(): raise ValueError("X contains negative values.") if Y is not X and (Y < 0).any(): raise ValueError("Y contains negative values.") result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype) _chi2_kernel_fast(X, Y, result) return result
5,421
def test__kms(): # pylint: disable=no-self-use """ _kms calls boto3.Session.client with 'kms' as its only argument. """ with patch("boto3.Session.client") as client: aws_kms._kms() client.assert_called_with("kms")
def test__kms(): """ _kms calls boto3.Session.client with 'kms' as its only argument. """ with patch("boto3.Session.client") as client: aws_kms._kms() client.assert_called_with("kms")
20,135
def register_opts(ignore_errors=False): rbac_opts = [ cfg.BoolOpt("enable", default=False, help="Enable RBAC."), cfg.StrOpt("backend", default="noop", help="RBAC backend to use."), cfg.BoolOpt( "sync_remote_groups", default=False, help="True to synchronize remote groups returned by the auth backed for each " "StackStorm user with local StackStorm roles based on the group to role " "mapping definition files.", ), cfg.BoolOpt( "permission_isolation", default=False, help="Isolate resources by user. For now, these resources only include rules and " "executions. All resources can only be viewed or executed by the owning user " "except the admin and system_user who can view or run everything.", ), ] do_register_opts(rbac_opts, "rbac", ignore_errors) system_user_opts = [ cfg.StrOpt("user", default="stanley", help="Default system user."), cfg.StrOpt( "ssh_key_file", default="/home/stanley/.ssh/stanley_rsa", help="SSH private key for the system user.", ), ] do_register_opts(system_user_opts, "system_user", ignore_errors) schema_opts = [ cfg.IntOpt("version", default=4, help="Version of JSON schema to use."), cfg.StrOpt( "draft", default="http://json-schema.org/draft-04/schema#", help="URL to the JSON schema draft.", ), ] do_register_opts(schema_opts, "schema", ignore_errors) system_opts = [ cfg.BoolOpt("debug", default=False, help="Enable debug mode."), cfg.StrOpt( "base_path", default="/opt/stackstorm", help="Base path to all st2 artifacts.", ), cfg.BoolOpt( "validate_trigger_parameters", default=True, help="True to validate parameters for non-system trigger types when creating" "a rule. By default, only parameters for system triggers are validated.", ), cfg.BoolOpt( "validate_trigger_payload", default=True, help="True to validate payload for non-system trigger types when dispatching a trigger " "inside the sensor. By default, only payload for system triggers is validated.", ), cfg.BoolOpt( "validate_output_schema", default=False, help="True to validate action and runner output against schema.", ), ] do_register_opts(system_opts, "system", ignore_errors) system_packs_base_path = os.path.join(cfg.CONF.system.base_path, "packs") system_runners_base_path = os.path.join(cfg.CONF.system.base_path, "runners") content_opts = [ cfg.StrOpt( "pack_group", default="st2packs", help="User group that can write to packs directory.", ), cfg.StrOpt( "system_packs_base_path", default=system_packs_base_path, help="Path to the directory which contains system packs.", ), cfg.StrOpt( "system_runners_base_path", default=system_runners_base_path, help="Path to the directory which contains system runners. " "NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0", ), cfg.StrOpt( "packs_base_paths", default=None, help="Paths which will be searched for integration packs.", ), cfg.StrOpt( "runners_base_paths", default=None, help="Paths which will be searched for runners. " "NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0", ), cfg.ListOpt( "index_url", default=["https://index.stackstorm.org/v1/index.json"], help="A URL pointing to the pack index. StackStorm Exchange is used by " "default. Use a comma-separated list for multiple indexes if you " 'want to get other packs discovered with "st2 pack search".', ), ] do_register_opts(content_opts, "content", ignore_errors) webui_opts = [ cfg.StrOpt( "webui_base_url", default="https://%s" % socket.getfqdn(), help="Base https URL to access st2 Web UI. This is used to construct history URLs " "that are sent out when chatops is used to kick off executions.", ) ] do_register_opts(webui_opts, "webui", ignore_errors) db_opts = [ cfg.StrOpt("host", default="127.0.0.1", help="host of db server"), cfg.IntOpt("port", default=27017, help="port of db server"), cfg.StrOpt("db_name", default="st2", help="name of database"), cfg.StrOpt("username", help="username for db login"), cfg.StrOpt("password", help="password for db login"), cfg.IntOpt( "connection_timeout", default=3 * 1000, help="Connection and server selection timeout (in ms).", ), cfg.IntOpt( "connection_retry_max_delay_m", default=3, help="Connection retry total time (minutes).", ), cfg.IntOpt( "connection_retry_backoff_max_s", default=10, help="Connection retry backoff max (seconds).", ), cfg.IntOpt( "connection_retry_backoff_mul", default=1, help="Backoff multiplier (seconds).", ), cfg.BoolOpt( "ssl", default=False, help="Create the connection to mongodb using SSL" ), cfg.StrOpt( "ssl_keyfile", default=None, help="Private keyfile used to identify the local connection against MongoDB.", ), cfg.StrOpt( "ssl_certfile", default=None, help="Certificate file used to identify the localconnection", ), cfg.StrOpt( "ssl_cert_reqs", default=None, choices="none, optional, required", help="Specifies whether a certificate is required from the other side of the " "connection, and whether it will be validated if provided", ), cfg.StrOpt( "ssl_ca_certs", default=None, help="ca_certs file contains a set of concatenated CA certificates, which are " "used to validate certificates passed from MongoDB.", ), cfg.BoolOpt( "ssl_match_hostname", default=True, help="If True and `ssl_cert_reqs` is not None, enables hostname verification", ), cfg.StrOpt( "authentication_mechanism", default=None, help="Specifies database authentication mechanisms. " "By default, it use SCRAM-SHA-1 with MongoDB 3.0 and later, " "MONGODB-CR (MongoDB Challenge Response protocol) for older servers.", ), cfg.StrOpt( "compressors", default="", help="Comma delimited string of compression algorithms to use for transport level " "compression. Actual algorithm will then be decided based on the algorithms " "supported by the client and the server. For example: zstd. Defaults to no " "compression. Keep in mind that zstd is only supported with MongoDB 4.2 and later.", ), cfg.IntOpt( "zlib_compression_level", default="", help="Compression level when compressors is set to zlib. Valid values are -1 to 9. " "Defaults to 6.", ), ] do_register_opts(db_opts, "database", ignore_errors) messaging_opts = [ # It would be nice to be able to deprecate url and completely switch to using # url. However, this will be a breaking change and will have impact so allowing both. cfg.StrOpt( "url", default="amqp://guest:[email protected]:5672//", help="URL of the messaging server.", ), cfg.ListOpt( "cluster_urls", default=[], help="URL of all the nodes in a messaging service cluster.", ), cfg.IntOpt( "connection_retries", default=10, help="How many times should we retry connection before failing.", ), cfg.IntOpt( "connection_retry_wait", default=10000, help="How long should we wait between connection retries.", ), cfg.BoolOpt( "ssl", default=False, help="Use SSL / TLS to connect to the messaging server. Same as " 'appending "?ssl=true" at the end of the connection URL string.', ), cfg.StrOpt( "ssl_keyfile", default=None, help="Private keyfile used to identify the local connection against RabbitMQ.", ), cfg.StrOpt( "ssl_certfile", default=None, help="Certificate file used to identify the local connection (client).", ), cfg.StrOpt( "ssl_cert_reqs", default=None, choices="none, optional, required", help="Specifies whether a certificate is required from the other side of the " "connection, and whether it will be validated if provided.", ), cfg.StrOpt( "ssl_ca_certs", default=None, help="ca_certs file contains a set of concatenated CA certificates, which are " "used to validate certificates passed from RabbitMQ.", ), cfg.StrOpt( "login_method", default=None, help="Login method to use (AMQPLAIN, PLAIN, EXTERNAL, etc.).", ), cfg.StrOpt( "compression", default=None, choices=["zstd", "lzma", "bz2", "gzip", None], help="Compression algorithm to use for compressing the payloads which are sent over " "the message bus. Defaults to no compression.", ), ] do_register_opts(messaging_opts, "messaging", ignore_errors) syslog_opts = [ cfg.StrOpt("host", default="127.0.0.1", help="Host for the syslog server."), cfg.IntOpt("port", default=514, help="Port for the syslog server."), cfg.StrOpt("facility", default="local7", help="Syslog facility level."), cfg.StrOpt( "protocol", default="udp", help="Transport protocol to use (udp / tcp)." ), ] do_register_opts(syslog_opts, "syslog", ignore_errors) log_opts = [ cfg.ListOpt("excludes", default="", help="Exclusion list of loggers to omit."), cfg.BoolOpt( "redirect_stderr", default=False, help="Controls if stderr should be redirected to the logs.", ), cfg.BoolOpt( "mask_secrets", default=True, help="True to mask secrets in the log files." ), cfg.ListOpt( "mask_secrets_blacklist", default=[], help="Blacklist of additional attribute names to mask in the log messages.", ), ] do_register_opts(log_opts, "log", ignore_errors) # Common API options api_opts = [ cfg.StrOpt("host", default="127.0.0.1", help="StackStorm API server host"), cfg.IntOpt("port", default=9101, help="StackStorm API server port"), cfg.ListOpt( "allow_origin", default=["http://127.0.0.1:3000"], help="List of origins allowed for api, auth and stream", ), cfg.BoolOpt( "mask_secrets", default=True, help="True to mask secrets in the API responses", ), cfg.BoolOpt( "auth_cookie_secure", default=True, help='True if secure flag should be set for "auth-token" cookie which is set on ' "successful authentication via st2web. You should only set this to False if you have " "a good reason to not run and access StackStorm behind https proxy.", ), cfg.StrOpt( "auth_cookie_same_site", default="lax", choices=["strict", "lax", "none", "None"], help="SameSite attribute value for the " "auth-token cookie we set on successful authentication from st2web. If you " "don't have a specific reason (e.g. supporting old browsers) you are recommended to " "set this value to strict.", ), ] do_register_opts(api_opts, "api", ignore_errors) # Key Value store options keyvalue_opts = [ cfg.BoolOpt( "enable_encryption", default=True, help='Allow encryption of values in key value stored qualified as "secret".', ), cfg.StrOpt( "encryption_key_path", default="", help="Location of the symmetric encryption key for encrypting values in kvstore. " "This key should be in JSON and should've been generated using " "st2-generate-symmetric-crypto-key tool.", ), ] do_register_opts(keyvalue_opts, group="keyvalue") # Common auth options auth_opts = [ cfg.StrOpt( "api_url", default=None, help="Base URL to the API endpoint excluding the version", ), cfg.BoolOpt("enable", default=True, help="Enable authentication middleware."), cfg.IntOpt( "token_ttl", default=(24 * 60 * 60), help="Access token ttl in seconds." ), # This TTL is used for tokens which belong to StackStorm services cfg.IntOpt( "service_token_ttl", default=(24 * 60 * 60), help="Service token ttl in seconds.", ), ] do_register_opts(auth_opts, "auth", ignore_errors) # Runner options default_python_bin_path = sys.executable # If the virtualenv uses a symlinked python, then try using virtualenv from that venv # first before looking for virtualenv installed in python's system-site-packages. base_dir = os.path.dirname(default_python_bin_path) default_virtualenv_bin_path = os.path.join(base_dir, "virtualenv") if not os.path.exists(default_virtualenv_bin_path): base_dir = os.path.dirname(os.path.realpath(default_python_bin_path)) default_virtualenv_bin_path = os.path.join(base_dir, "virtualenv") action_runner_opts = [ # Common runner options cfg.StrOpt( "logging", default="/etc/st2/logging.actionrunner.conf", help="location of the logging.conf file", ), # Python runner options cfg.StrOpt( "python_binary", default=default_python_bin_path, help="Python binary which will be used by Python actions.", ), cfg.StrOpt( "virtualenv_binary", default=default_virtualenv_bin_path, help="Virtualenv binary which should be used to create pack virtualenvs.", ), cfg.StrOpt( "python_runner_log_level", default=PYTHON_RUNNER_DEFAULT_LOG_LEVEL, help="Default log level to use for Python runner actions. Can be overriden on " 'invocation basis using "log_level" runner parameter.', ), cfg.ListOpt( "virtualenv_opts", default=["--system-site-packages"], help='List of virtualenv options to be passsed to "virtualenv" command that ' "creates pack virtualenv.", ), cfg.ListOpt( "pip_opts", default=[], help='List of pip options to be passed to "pip install" command when installing pack ' "dependencies into pack virtual environment.", ), cfg.BoolOpt( "stream_output", default=True, help="True to store and stream action output (stdout and stderr) in real-time.", ), cfg.IntOpt( "stream_output_buffer_size", default=-1, help=( "Buffer size to use for real time action output streaming. 0 means unbuffered " "1 means line buffered, -1 means system default, which usually means fully " "buffered and any other positive value means use a buffer of (approximately) " "that size" ), ), ] do_register_opts( action_runner_opts, group="actionrunner", ignore_errors=ignore_errors ) dispatcher_pool_opts = [ cfg.IntOpt( "workflows_pool_size", default=40, help="Internal pool size for dispatcher used by workflow actions.", ), cfg.IntOpt( "actions_pool_size", default=60, help="Internal pool size for dispatcher used by regular actions.", ), ] do_register_opts( dispatcher_pool_opts, group="actionrunner", ignore_errors=ignore_errors ) ssh_runner_opts = [ cfg.StrOpt( "remote_dir", default="/tmp", help="Location of the script on the remote filesystem.", ), cfg.BoolOpt( "allow_partial_failure", default=False, help="How partial success of actions run on multiple nodes should be treated.", ), cfg.IntOpt( "max_parallel_actions", default=50, help="Max number of parallel remote SSH actions that should be run. " "Works only with Paramiko SSH runner.", ), cfg.BoolOpt( "use_ssh_config", default=False, help="Use the .ssh/config file. Useful to override ports etc.", ), cfg.StrOpt( "ssh_config_file_path", default="~/.ssh/config", help="Path to the ssh config file.", ), cfg.IntOpt( "ssh_connect_timeout", default=60, help="Max time in seconds to establish the SSH connection.", ), ] do_register_opts(ssh_runner_opts, group="ssh_runner", ignore_errors=ignore_errors) # Common options (used by action runner and sensor container) action_sensor_opts = [ cfg.BoolOpt( "enable", default=True, help="Whether to enable or disable the ability to post a trigger on action.", ), cfg.ListOpt( "emit_when", default=LIVEACTION_COMPLETED_STATES, help="List of execution statuses for which a trigger will be emitted. ", ), ] do_register_opts( action_sensor_opts, group="action_sensor", ignore_errors=ignore_errors ) # Common options for content pack_lib_opts = [ cfg.BoolOpt( "enable_common_libs", default=False, help="Enable/Disable support for pack common libs. " "Setting this config to ``True`` would allow you to " "place common library code for sensors and actions in lib/ folder " "in packs and use them in python sensors and actions. " "See https://docs.stackstorm.com/reference/" "sharing_code_sensors_actions.html " "for details.", ) ] do_register_opts(pack_lib_opts, group="packs", ignore_errors=ignore_errors) # Coordination options coord_opts = [ cfg.StrOpt("url", default=None, help="Endpoint for the coordination server."), cfg.IntOpt( "lock_timeout", default=60, help="TTL for the lock if backend suports it." ), cfg.BoolOpt( "service_registry", default=False, help="True to register StackStorm services in a service registry.", ), ] do_register_opts(coord_opts, "coordination", ignore_errors) # XXX: This is required for us to support deprecated config group results_tracker query_opts = [ cfg.IntOpt( "thread_pool_size", help="Number of threads to use to query external workflow systems.", ), cfg.FloatOpt( "query_interval", help="Time interval between subsequent queries for a context " "to external workflow system.", ), ] do_register_opts(query_opts, group="results_tracker", ignore_errors=ignore_errors) # Common stream options stream_opts = [ cfg.IntOpt( "heartbeat", default=25, help="Send empty message every N seconds to keep connection open", ) ] do_register_opts(stream_opts, group="stream", ignore_errors=ignore_errors) # Common CLI options cli_opts = [ cfg.BoolOpt( "debug", default=False, help="Enable debug mode. By default this will set all log levels to DEBUG.", ), cfg.BoolOpt( "profile", default=False, help="Enable profile mode. In the profile mode all the MongoDB queries and " "related profile data are logged.", ), cfg.BoolOpt( "use-debugger", default=True, help="Enables debugger. Note that using this option changes how the " "eventlet library is used to support async IO. This could result in " "failures that do not occur under normal operation.", ), ] do_register_cli_opts(cli_opts, ignore_errors=ignore_errors) # Metrics Options stream options metrics_opts = [ cfg.StrOpt( "driver", default="noop", help="Driver type for metrics collection." ), cfg.StrOpt( "host", default="127.0.0.1", help="Destination server to connect to if driver requires connection.", ), cfg.IntOpt( "port", default=8125, help="Destination port to connect to if driver requires connection.", ), cfg.StrOpt( "prefix", default=None, help="Optional prefix which is prepended to all the metric names. Comes handy when " "you want to submit metrics from various environment to the same metric " "backend instance.", ), cfg.FloatOpt( "sample_rate", default=1, help="Randomly sample and only send metrics for X% of metric operations to the " "backend. Default value of 1 means no sampling is done and all the metrics are " "sent to the backend. E.g. 0.1 would mean 10% of operations are sampled.", ), ] do_register_opts(metrics_opts, group="metrics", ignore_errors=ignore_errors) # Common timers engine options timer_logging_opts = [ cfg.StrOpt( "logging", default=None, help="Location of the logging configuration file. " "NOTE: Deprecated in favor of timersengine.logging", ), ] timers_engine_logging_opts = [ cfg.StrOpt( "logging", default="/etc/st2/logging.timersengine.conf", help="Location of the logging configuration file.", ) ] do_register_opts(timer_logging_opts, group="timer", ignore_errors=ignore_errors) do_register_opts( timers_engine_logging_opts, group="timersengine", ignore_errors=ignore_errors ) # NOTE: We default old style deprecated "timer" options to None so our code # works correclty and "timersengine" has precedence over "timers" # NOTE: "timer" section will be removed in v3.1 timer_opts = [ cfg.StrOpt( "local_timezone", default=None, help="Timezone pertaining to the location where st2 is run. " "NOTE: Deprecated in favor of timersengine.local_timezone", ), cfg.BoolOpt( "enable", default=None, help="Specify to enable timer service. " "NOTE: Deprecated in favor of timersengine.enable", ), ] timers_engine_opts = [ cfg.StrOpt( "local_timezone", default="America/Los_Angeles", help="Timezone pertaining to the location where st2 is run.", ), cfg.BoolOpt("enable", default=True, help="Specify to enable timer service."), ] do_register_opts(timer_opts, group="timer", ignore_errors=ignore_errors) do_register_opts( timers_engine_opts, group="timersengine", ignore_errors=ignore_errors ) # Workflow engine options workflow_engine_opts = [ cfg.IntOpt( "retry_stop_max_msec", default=60000, help="Max time to stop retrying." ), cfg.IntOpt( "retry_wait_fixed_msec", default=1000, help="Interval inbetween retries." ), cfg.FloatOpt( "retry_max_jitter_msec", default=1000, help="Max jitter interval to smooth out retries.", ), cfg.IntOpt( "gc_max_idle_sec", default=0, help="Max seconds to allow workflow execution be idled before it is identified as " "orphaned and cancelled by the garbage collector. A value of zero means the " "feature is disabled. This is disabled by default.", ), ] do_register_opts( workflow_engine_opts, group="workflow_engine", ignore_errors=ignore_errors )
def register_opts(ignore_errors=False): rbac_opts = [ cfg.BoolOpt("enable", default=False, help="Enable RBAC."), cfg.StrOpt("backend", default="noop", help="RBAC backend to use."), cfg.BoolOpt( "sync_remote_groups", default=False, help="True to synchronize remote groups returned by the auth backed for each " "StackStorm user with local StackStorm roles based on the group to role " "mapping definition files.", ), cfg.BoolOpt( "permission_isolation", default=False, help="Isolate resources by user. For now, these resources only include rules and " "executions. All resources can only be viewed or executed by the owning user " "except the admin and system_user who can view or run everything.", ), ] do_register_opts(rbac_opts, "rbac", ignore_errors) system_user_opts = [ cfg.StrOpt("user", default="stanley", help="Default system user."), cfg.StrOpt( "ssh_key_file", default="/home/stanley/.ssh/stanley_rsa", help="SSH private key for the system user.", ), ] do_register_opts(system_user_opts, "system_user", ignore_errors) schema_opts = [ cfg.IntOpt("version", default=4, help="Version of JSON schema to use."), cfg.StrOpt( "draft", default="http://json-schema.org/draft-04/schema#", help="URL to the JSON schema draft.", ), ] do_register_opts(schema_opts, "schema", ignore_errors) system_opts = [ cfg.BoolOpt("debug", default=False, help="Enable debug mode."), cfg.StrOpt( "base_path", default="/opt/stackstorm", help="Base path to all st2 artifacts.", ), cfg.BoolOpt( "validate_trigger_parameters", default=True, help="True to validate parameters for non-system trigger types when creating" "a rule. By default, only parameters for system triggers are validated.", ), cfg.BoolOpt( "validate_trigger_payload", default=True, help="True to validate payload for non-system trigger types when dispatching a trigger " "inside the sensor. By default, only payload for system triggers is validated.", ), cfg.BoolOpt( "validate_output_schema", default=False, help="True to validate action and runner output against schema.", ), ] do_register_opts(system_opts, "system", ignore_errors) system_packs_base_path = os.path.join(cfg.CONF.system.base_path, "packs") system_runners_base_path = os.path.join(cfg.CONF.system.base_path, "runners") content_opts = [ cfg.StrOpt( "pack_group", default="st2packs", help="User group that can write to packs directory.", ), cfg.StrOpt( "system_packs_base_path", default=system_packs_base_path, help="Path to the directory which contains system packs.", ), cfg.StrOpt( "system_runners_base_path", default=system_runners_base_path, help="Path to the directory which contains system runners. " "NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0", ), cfg.StrOpt( "packs_base_paths", default=None, help="Paths which will be searched for integration packs.", ), cfg.StrOpt( "runners_base_paths", default=None, help="Paths which will be searched for runners. " "NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0", ), cfg.ListOpt( "index_url", default=["https://index.stackstorm.org/v1/index.json"], help="A URL pointing to the pack index. StackStorm Exchange is used by " "default. Use a comma-separated list for multiple indexes if you " 'want to get other packs discovered with "st2 pack search".', ), ] do_register_opts(content_opts, "content", ignore_errors) webui_opts = [ cfg.StrOpt( "webui_base_url", default="https://%s" % socket.getfqdn(), help="Base https URL to access st2 Web UI. This is used to construct history URLs " "that are sent out when chatops is used to kick off executions.", ) ] do_register_opts(webui_opts, "webui", ignore_errors) db_opts = [ cfg.StrOpt("host", default="127.0.0.1", help="host of db server"), cfg.IntOpt("port", default=27017, help="port of db server"), cfg.StrOpt("db_name", default="st2", help="name of database"), cfg.StrOpt("username", help="username for db login"), cfg.StrOpt("password", help="password for db login"), cfg.IntOpt( "connection_timeout", default=3 * 1000, help="Connection and server selection timeout (in ms).", ), cfg.IntOpt( "connection_retry_max_delay_m", default=3, help="Connection retry total time (minutes).", ), cfg.IntOpt( "connection_retry_backoff_max_s", default=10, help="Connection retry backoff max (seconds).", ), cfg.IntOpt( "connection_retry_backoff_mul", default=1, help="Backoff multiplier (seconds).", ), cfg.BoolOpt( "ssl", default=False, help="Create the connection to mongodb using SSL" ), cfg.StrOpt( "ssl_keyfile", default=None, help="Private keyfile used to identify the local connection against MongoDB.", ), cfg.StrOpt( "ssl_certfile", default=None, help="Certificate file used to identify the localconnection", ), cfg.StrOpt( "ssl_cert_reqs", default=None, choices="none, optional, required", help="Specifies whether a certificate is required from the other side of the " "connection, and whether it will be validated if provided", ), cfg.StrOpt( "ssl_ca_certs", default=None, help="ca_certs file contains a set of concatenated CA certificates, which are " "used to validate certificates passed from MongoDB.", ), cfg.BoolOpt( "ssl_match_hostname", default=True, help="If True and `ssl_cert_reqs` is not None, enables hostname verification", ), cfg.StrOpt( "authentication_mechanism", default=None, help="Specifies database authentication mechanisms. " "By default, it use SCRAM-SHA-1 with MongoDB 3.0 and later, " "MONGODB-CR (MongoDB Challenge Response protocol) for older servers.", ), cfg.StrOpt( "compressors", default="", help="Comma delimited string of compression algorithms to use for transport level " "compression. Actual algorithm will then be decided based on the algorithms " "supported by the client and the server. For example: zstd. Defaults to no " "compression. Keep in mind that zstd is only supported with MongoDB 4.2 and later.", ), cfg.IntOpt( "zlib_compression_level", default="", help="Compression level when compressors is set to zlib. Valid values are -1 to 9. " "Defaults to 6.", ), ] do_register_opts(db_opts, "database", ignore_errors) messaging_opts = [ # It would be nice to be able to deprecate url and completely switch to using # url. However, this will be a breaking change and will have impact so allowing both. cfg.StrOpt( "url", default="amqp://guest:[email protected]:5672//", help="URL of the messaging server.", ), cfg.ListOpt( "cluster_urls", default=[], help="URL of all the nodes in a messaging service cluster.", ), cfg.IntOpt( "connection_retries", default=10, help="How many times should we retry connection before failing.", ), cfg.IntOpt( "connection_retry_wait", default=10000, help="How long should we wait between connection retries.", ), cfg.BoolOpt( "ssl", default=False, help="Use SSL / TLS to connect to the messaging server. Same as " 'appending "?ssl=true" at the end of the connection URL string.', ), cfg.StrOpt( "ssl_keyfile", default=None, help="Private keyfile used to identify the local connection against RabbitMQ.", ), cfg.StrOpt( "ssl_certfile", default=None, help="Certificate file used to identify the local connection (client).", ), cfg.StrOpt( "ssl_cert_reqs", default=None, choices="none, optional, required", help="Specifies whether a certificate is required from the other side of the " "connection, and whether it will be validated if provided.", ), cfg.StrOpt( "ssl_ca_certs", default=None, help="ca_certs file contains a set of concatenated CA certificates, which are " "used to validate certificates passed from RabbitMQ.", ), cfg.StrOpt( "login_method", default=None, help="Login method to use (AMQPLAIN, PLAIN, EXTERNAL, etc.).", ), cfg.StrOpt( "compression", default=None, choices=["zstd", "lzma", "bz2", "gzip", None], help="Compression algorithm to use for compressing the payloads which are sent over " "the message bus. Defaults to no compression.", ), ] do_register_opts(messaging_opts, "messaging", ignore_errors) syslog_opts = [ cfg.StrOpt("host", default="127.0.0.1", help="Host for the syslog server."), cfg.IntOpt("port", default=514, help="Port for the syslog server."), cfg.StrOpt("facility", default="local7", help="Syslog facility level."), cfg.StrOpt( "protocol", default="udp", help="Transport protocol to use (udp / tcp)." ), ] do_register_opts(syslog_opts, "syslog", ignore_errors) log_opts = [ cfg.ListOpt("excludes", default="", help="Exclusion list of loggers to omit."), cfg.BoolOpt( "redirect_stderr", default=False, help="Controls if stderr should be redirected to the logs.", ), cfg.BoolOpt( "mask_secrets", default=True, help="True to mask secrets in the log files." ), cfg.ListOpt( "mask_secrets_blacklist", default=[], help="Blacklist of additional attribute names to mask in the log messages.", ), ] do_register_opts(log_opts, "log", ignore_errors) # Common API options api_opts = [ cfg.StrOpt("host", default="127.0.0.1", help="StackStorm API server host"), cfg.IntOpt("port", default=9101, help="StackStorm API server port"), cfg.ListOpt( "allow_origin", default=["http://127.0.0.1:3000"], help="List of origins allowed for api, auth and stream", ), cfg.BoolOpt( "mask_secrets", default=True, help="True to mask secrets in the API responses", ), cfg.BoolOpt( "auth_cookie_secure", default=True, help='True if secure flag should be set for "auth-token" cookie which is set on ' "successful authentication via st2web. You should only set this to False if you have " "a good reason to not run and access StackStorm behind https proxy.", ), cfg.StrOpt( "auth_cookie_same_site", default="lax", choices=["strict", "lax", "none", "None"], help="SameSite attribute value for the " "auth-token cookie we set on successful authentication from st2web. If you " "don't have a specific reason (e.g. supporting old browsers) we recommend you " "set this value to strict.", ), ] do_register_opts(api_opts, "api", ignore_errors) # Key Value store options keyvalue_opts = [ cfg.BoolOpt( "enable_encryption", default=True, help='Allow encryption of values in key value stored qualified as "secret".', ), cfg.StrOpt( "encryption_key_path", default="", help="Location of the symmetric encryption key for encrypting values in kvstore. " "This key should be in JSON and should've been generated using " "st2-generate-symmetric-crypto-key tool.", ), ] do_register_opts(keyvalue_opts, group="keyvalue") # Common auth options auth_opts = [ cfg.StrOpt( "api_url", default=None, help="Base URL to the API endpoint excluding the version", ), cfg.BoolOpt("enable", default=True, help="Enable authentication middleware."), cfg.IntOpt( "token_ttl", default=(24 * 60 * 60), help="Access token ttl in seconds." ), # This TTL is used for tokens which belong to StackStorm services cfg.IntOpt( "service_token_ttl", default=(24 * 60 * 60), help="Service token ttl in seconds.", ), ] do_register_opts(auth_opts, "auth", ignore_errors) # Runner options default_python_bin_path = sys.executable # If the virtualenv uses a symlinked python, then try using virtualenv from that venv # first before looking for virtualenv installed in python's system-site-packages. base_dir = os.path.dirname(default_python_bin_path) default_virtualenv_bin_path = os.path.join(base_dir, "virtualenv") if not os.path.exists(default_virtualenv_bin_path): base_dir = os.path.dirname(os.path.realpath(default_python_bin_path)) default_virtualenv_bin_path = os.path.join(base_dir, "virtualenv") action_runner_opts = [ # Common runner options cfg.StrOpt( "logging", default="/etc/st2/logging.actionrunner.conf", help="location of the logging.conf file", ), # Python runner options cfg.StrOpt( "python_binary", default=default_python_bin_path, help="Python binary which will be used by Python actions.", ), cfg.StrOpt( "virtualenv_binary", default=default_virtualenv_bin_path, help="Virtualenv binary which should be used to create pack virtualenvs.", ), cfg.StrOpt( "python_runner_log_level", default=PYTHON_RUNNER_DEFAULT_LOG_LEVEL, help="Default log level to use for Python runner actions. Can be overriden on " 'invocation basis using "log_level" runner parameter.', ), cfg.ListOpt( "virtualenv_opts", default=["--system-site-packages"], help='List of virtualenv options to be passsed to "virtualenv" command that ' "creates pack virtualenv.", ), cfg.ListOpt( "pip_opts", default=[], help='List of pip options to be passed to "pip install" command when installing pack ' "dependencies into pack virtual environment.", ), cfg.BoolOpt( "stream_output", default=True, help="True to store and stream action output (stdout and stderr) in real-time.", ), cfg.IntOpt( "stream_output_buffer_size", default=-1, help=( "Buffer size to use for real time action output streaming. 0 means unbuffered " "1 means line buffered, -1 means system default, which usually means fully " "buffered and any other positive value means use a buffer of (approximately) " "that size" ), ), ] do_register_opts( action_runner_opts, group="actionrunner", ignore_errors=ignore_errors ) dispatcher_pool_opts = [ cfg.IntOpt( "workflows_pool_size", default=40, help="Internal pool size for dispatcher used by workflow actions.", ), cfg.IntOpt( "actions_pool_size", default=60, help="Internal pool size for dispatcher used by regular actions.", ), ] do_register_opts( dispatcher_pool_opts, group="actionrunner", ignore_errors=ignore_errors ) ssh_runner_opts = [ cfg.StrOpt( "remote_dir", default="/tmp", help="Location of the script on the remote filesystem.", ), cfg.BoolOpt( "allow_partial_failure", default=False, help="How partial success of actions run on multiple nodes should be treated.", ), cfg.IntOpt( "max_parallel_actions", default=50, help="Max number of parallel remote SSH actions that should be run. " "Works only with Paramiko SSH runner.", ), cfg.BoolOpt( "use_ssh_config", default=False, help="Use the .ssh/config file. Useful to override ports etc.", ), cfg.StrOpt( "ssh_config_file_path", default="~/.ssh/config", help="Path to the ssh config file.", ), cfg.IntOpt( "ssh_connect_timeout", default=60, help="Max time in seconds to establish the SSH connection.", ), ] do_register_opts(ssh_runner_opts, group="ssh_runner", ignore_errors=ignore_errors) # Common options (used by action runner and sensor container) action_sensor_opts = [ cfg.BoolOpt( "enable", default=True, help="Whether to enable or disable the ability to post a trigger on action.", ), cfg.ListOpt( "emit_when", default=LIVEACTION_COMPLETED_STATES, help="List of execution statuses for which a trigger will be emitted. ", ), ] do_register_opts( action_sensor_opts, group="action_sensor", ignore_errors=ignore_errors ) # Common options for content pack_lib_opts = [ cfg.BoolOpt( "enable_common_libs", default=False, help="Enable/Disable support for pack common libs. " "Setting this config to ``True`` would allow you to " "place common library code for sensors and actions in lib/ folder " "in packs and use them in python sensors and actions. " "See https://docs.stackstorm.com/reference/" "sharing_code_sensors_actions.html " "for details.", ) ] do_register_opts(pack_lib_opts, group="packs", ignore_errors=ignore_errors) # Coordination options coord_opts = [ cfg.StrOpt("url", default=None, help="Endpoint for the coordination server."), cfg.IntOpt( "lock_timeout", default=60, help="TTL for the lock if backend suports it." ), cfg.BoolOpt( "service_registry", default=False, help="True to register StackStorm services in a service registry.", ), ] do_register_opts(coord_opts, "coordination", ignore_errors) # XXX: This is required for us to support deprecated config group results_tracker query_opts = [ cfg.IntOpt( "thread_pool_size", help="Number of threads to use to query external workflow systems.", ), cfg.FloatOpt( "query_interval", help="Time interval between subsequent queries for a context " "to external workflow system.", ), ] do_register_opts(query_opts, group="results_tracker", ignore_errors=ignore_errors) # Common stream options stream_opts = [ cfg.IntOpt( "heartbeat", default=25, help="Send empty message every N seconds to keep connection open", ) ] do_register_opts(stream_opts, group="stream", ignore_errors=ignore_errors) # Common CLI options cli_opts = [ cfg.BoolOpt( "debug", default=False, help="Enable debug mode. By default this will set all log levels to DEBUG.", ), cfg.BoolOpt( "profile", default=False, help="Enable profile mode. In the profile mode all the MongoDB queries and " "related profile data are logged.", ), cfg.BoolOpt( "use-debugger", default=True, help="Enables debugger. Note that using this option changes how the " "eventlet library is used to support async IO. This could result in " "failures that do not occur under normal operation.", ), ] do_register_cli_opts(cli_opts, ignore_errors=ignore_errors) # Metrics Options stream options metrics_opts = [ cfg.StrOpt( "driver", default="noop", help="Driver type for metrics collection." ), cfg.StrOpt( "host", default="127.0.0.1", help="Destination server to connect to if driver requires connection.", ), cfg.IntOpt( "port", default=8125, help="Destination port to connect to if driver requires connection.", ), cfg.StrOpt( "prefix", default=None, help="Optional prefix which is prepended to all the metric names. Comes handy when " "you want to submit metrics from various environment to the same metric " "backend instance.", ), cfg.FloatOpt( "sample_rate", default=1, help="Randomly sample and only send metrics for X% of metric operations to the " "backend. Default value of 1 means no sampling is done and all the metrics are " "sent to the backend. E.g. 0.1 would mean 10% of operations are sampled.", ), ] do_register_opts(metrics_opts, group="metrics", ignore_errors=ignore_errors) # Common timers engine options timer_logging_opts = [ cfg.StrOpt( "logging", default=None, help="Location of the logging configuration file. " "NOTE: Deprecated in favor of timersengine.logging", ), ] timers_engine_logging_opts = [ cfg.StrOpt( "logging", default="/etc/st2/logging.timersengine.conf", help="Location of the logging configuration file.", ) ] do_register_opts(timer_logging_opts, group="timer", ignore_errors=ignore_errors) do_register_opts( timers_engine_logging_opts, group="timersengine", ignore_errors=ignore_errors ) # NOTE: We default old style deprecated "timer" options to None so our code # works correclty and "timersengine" has precedence over "timers" # NOTE: "timer" section will be removed in v3.1 timer_opts = [ cfg.StrOpt( "local_timezone", default=None, help="Timezone pertaining to the location where st2 is run. " "NOTE: Deprecated in favor of timersengine.local_timezone", ), cfg.BoolOpt( "enable", default=None, help="Specify to enable timer service. " "NOTE: Deprecated in favor of timersengine.enable", ), ] timers_engine_opts = [ cfg.StrOpt( "local_timezone", default="America/Los_Angeles", help="Timezone pertaining to the location where st2 is run.", ), cfg.BoolOpt("enable", default=True, help="Specify to enable timer service."), ] do_register_opts(timer_opts, group="timer", ignore_errors=ignore_errors) do_register_opts( timers_engine_opts, group="timersengine", ignore_errors=ignore_errors ) # Workflow engine options workflow_engine_opts = [ cfg.IntOpt( "retry_stop_max_msec", default=60000, help="Max time to stop retrying." ), cfg.IntOpt( "retry_wait_fixed_msec", default=1000, help="Interval inbetween retries." ), cfg.FloatOpt( "retry_max_jitter_msec", default=1000, help="Max jitter interval to smooth out retries.", ), cfg.IntOpt( "gc_max_idle_sec", default=0, help="Max seconds to allow workflow execution be idled before it is identified as " "orphaned and cancelled by the garbage collector. A value of zero means the " "feature is disabled. This is disabled by default.", ), ] do_register_opts( workflow_engine_opts, group="workflow_engine", ignore_errors=ignore_errors )
33,687
def init( name=None, http_host=DEFAULT_HTTP_HOST, http_port=DEFAULT_HTTP_PORT, metric_exporter=InMemoryExporter, ): """Initialize or connect to a serve cluster. If serve cluster is already initialized, this function will just return. If `ray.init` has not been called in this process, it will be called with no arguments. To specify kwargs to `ray.init`, it should be called separately before calling `serve.init`. Args: name (str): A unique name for this serve instance. This allows multiple serve instances to run on the same ray cluster. Must be specified in all subsequent serve.init() calls. http_host (str): Host for HTTP servers. Default to "0.0.0.0". Serve starts one HTTP server per node. http_port (int, List[int]): Port for HTTP server. Default to 8000. If a list of integers are passed in, multiple instance of the HTTP servers will be started and bind to each port. On linux machine, the ports can be repeated. metric_exporter(ExporterInterface): The class aggregates metrics from all RayServe actors and optionally export them to external services. Ray Serve has two options built in: InMemoryExporter and PrometheusExporter """ if name is not None and not isinstance(name, str): raise TypeError("name must be a string.") # Initialize ray if needed. if not ray.is_initialized(): ray.init() # Try to get serve controller if it exists global controller controller_name = format_actor_name(SERVE_CONTROLLER_NAME, name) try: controller = ray.get_actor(controller_name) return except ValueError: pass controller = ServeController.options( name=controller_name, max_restarts=-1, max_task_retries=-1, ).remote( name, http_host, http_port, metric_exporter, ) futures = [] for node_id in ray.state.node_ids(): future = block_until_http_ready.options( num_cpus=0, resources={ node_id: 0.01 }).remote( "http://{}:{}/-/routes".format(http_host, http_port), timeout=HTTP_PROXY_TIMEOUT) futures.append(future) ray.get(futures)
def init( name=None, http_host=DEFAULT_HTTP_HOST, http_port=DEFAULT_HTTP_PORT, metric_exporter=InMemoryExporter, ): """Initialize or connect to a serve cluster. If serve cluster is already initialized, this function will just return. If `ray.init` has not been called in this process, it will be called with no arguments. To specify kwargs to `ray.init`, it should be called separately before calling `serve.init`. Args: name (str): A unique name for this serve instance. This allows multiple serve instances to run on the same ray cluster. Must be specified in all subsequent serve.init() calls. http_host (str): Host for HTTP servers. Default to "0.0.0.0". Serve starts one HTTP server per node in the Ray cluster. http_port (int, List[int]): Port for HTTP server. Default to 8000. If a list of integers are passed in, multiple instance of the HTTP servers will be started and bind to each port. On linux machine, the ports can be repeated. metric_exporter(ExporterInterface): The class aggregates metrics from all RayServe actors and optionally export them to external services. Ray Serve has two options built in: InMemoryExporter and PrometheusExporter """ if name is not None and not isinstance(name, str): raise TypeError("name must be a string.") # Initialize ray if needed. if not ray.is_initialized(): ray.init() # Try to get serve controller if it exists global controller controller_name = format_actor_name(SERVE_CONTROLLER_NAME, name) try: controller = ray.get_actor(controller_name) return except ValueError: pass controller = ServeController.options( name=controller_name, max_restarts=-1, max_task_retries=-1, ).remote( name, http_host, http_port, metric_exporter, ) futures = [] for node_id in ray.state.node_ids(): future = block_until_http_ready.options( num_cpus=0, resources={ node_id: 0.01 }).remote( "http://{}:{}/-/routes".format(http_host, http_port), timeout=HTTP_PROXY_TIMEOUT) futures.append(future) ray.get(futures)
17,480
def get_chunksizes( variables: Iterable[Variable], ) -> Mapping[Hashable, Tuple[int, ...]]: chunks: Dict[Hashable, Tuple[int, ...]] = {} for v in variables: if hasattr(v.data, "chunks"): for dim, c in v.chunksizes.items(): if dim in chunks and c != chunks[dim]: raise ValueError( f"Object has inconsistent chunks along dimension {dim}. " "This can be fixed by calling unify_chunks()." ) chunks[dim] = c return Frozen(chunks)
def get_chunksizes( variables: Iterable[Variable], ) -> Mapping[Hashable, Tuple[int, ...]]: chunks: Dict[Any, Tuple[int, ...]] = {} for v in variables: if hasattr(v.data, "chunks"): for dim, c in v.chunksizes.items(): if dim in chunks and c != chunks[dim]: raise ValueError( f"Object has inconsistent chunks along dimension {dim}. " "This can be fixed by calling unify_chunks()." ) chunks[dim] = c return Frozen(chunks)
7,367
def _masked_phase_cross_correlation(reference_image, moving_image, reference_mask, moving_mask=None, overlap_ratio=0.3): """Masked image translation registration by masked normalized cross-correlation. Parameters ---------- reference_image : ndarray Reference image. moving_image : ndarray Image to register. Must be same dimensionality as ``reference_image``, but not necessarily the same size. reference_mask : ndarray Boolean mask for ``reference_image``. The mask should evaluate to ``True`` (or 1) on valid pixels. ``reference_mask`` should have the same shape as ``reference_image``. moving_mask : ndarray or None, optional Boolean mask for ``moving_image``. The mask should evaluate to ``True`` (or 1) on valid pixels. ``moving_mask`` should have the same shape as ``moving_image``. If ``None``, ``reference_mask`` will be used. overlap_ratio : float, optional Minimum allowed overlap ratio between images. The correlation for translations corresponding with an overlap ratio lower than this threshold will be ignored. A lower `overlap_ratio` leads to smaller maximum translation, while a higher `overlap_ratio` leads to greater robustness against spurious matches due to small overlap between masked images. Returns ------- shifts : ndarray Shift vector (in pixels) required to register ``moving_image`` with ``reference_image``. Axis ordering is consistent with numpy (e.g. Z, Y, X) References ---------- .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain. IEEE Transactions on Image Processing, vol. 21(5), pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402` .. [2] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """ if moving_mask is None: if reference_image.shape != moving_image.shape: raise ValueError( "Input images have different shapes, moving_mask must " "be explicitely set.") moving_mask = reference_mask.astype(bool) # We need masks to be of the same size as their respective images for (im, mask) in [(reference_image, reference_mask), (moving_image, moving_mask)]: if im.shape != mask.shape: raise ValueError( "Image sizes must match their respective mask sizes.") xcorr = cross_correlate_masked(moving_image, reference_image, moving_mask, reference_mask, axes=tuple(range(moving_image.ndim)), mode='full', overlap_ratio=overlap_ratio) # Generalize to the average of multiple equal maxima maxima = np.stack(np.nonzero(xcorr == xcorr.max()), axis=1) center = np.mean(maxima, axis=0) shifts = center - np.array(reference_image.shape) + 1 # The mismatch in size will impact the center location of the # cross-correlation size_mismatch = (np.array(moving_image.shape) - np.array(reference_image.shape)) return -shifts + (size_mismatch / 2)
def _masked_phase_cross_correlation(reference_image, moving_image, reference_mask, moving_mask=None, overlap_ratio=0.3): """Masked image translation registration by masked normalized cross-correlation. Parameters ---------- reference_image : ndarray Reference image. moving_image : ndarray Image to register. Must be same dimensionality as ``reference_image``, but not necessarily the same size. reference_mask : ndarray Boolean mask for ``reference_image``. The mask should evaluate to ``True`` (or 1) on valid pixels. ``reference_mask`` should have the same shape as ``reference_image``. moving_mask : ndarray or None, optional Boolean mask for ``moving_image``. The mask should evaluate to ``True`` (or 1) on valid pixels. ``moving_mask`` should have the same shape as ``moving_image``. If ``None``, ``reference_mask`` will be used. overlap_ratio : float, optional Minimum allowed overlap ratio between images. The correlation for translations corresponding with an overlap ratio lower than this threshold will be ignored. A lower `overlap_ratio` leads to smaller maximum translation, while a higher `overlap_ratio` leads to greater robustness against spurious matches due to small overlap between masked images. Returns ------- shifts : ndarray Shift vector (in pixels) required to register ``moving_image`` with ``reference_image``. Axis ordering is consistent with numpy (e.g. Z, Y, X) References ---------- .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain. IEEE Transactions on Image Processing, vol. 21(5), pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402` .. [2] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """ if moving_mask is None: if reference_image.shape != moving_image.shape: raise ValueError( "Input images have different shapes, moving_mask must " "be explicitly set.") moving_mask = reference_mask.astype(bool) # We need masks to be of the same size as their respective images for (im, mask) in [(reference_image, reference_mask), (moving_image, moving_mask)]: if im.shape != mask.shape: raise ValueError( "Image sizes must match their respective mask sizes.") xcorr = cross_correlate_masked(moving_image, reference_image, moving_mask, reference_mask, axes=tuple(range(moving_image.ndim)), mode='full', overlap_ratio=overlap_ratio) # Generalize to the average of multiple equal maxima maxima = np.stack(np.nonzero(xcorr == xcorr.max()), axis=1) center = np.mean(maxima, axis=0) shifts = center - np.array(reference_image.shape) + 1 # The mismatch in size will impact the center location of the # cross-correlation size_mismatch = (np.array(moving_image.shape) - np.array(reference_image.shape)) return -shifts + (size_mismatch / 2)
20,721
def findNodePlacement(nodes_to_arrange: List["SceneNode"], build_volume: "BuildVolume", fixed_nodes: Optional[List["SceneNode"]] = None, factor = 10000) -> Tuple[bool, List[Item]]: """ Find placement for a set of scene nodes, but don't actually move them just yet. :param nodes_to_arrange: The list of nodes that need to be moved. :param build_volume: The build volume that we want to place the nodes in. It gets size & disallowed areas from this. :param fixed_nodes: List of nods that should not be moved, but should be used when deciding where the others nodes are placed. :param factor: The library that we use is int based. This factor defines how accuracte we want it to be. :return: """ machine_width = build_volume.getWidth() machine_depth = build_volume.getDepth() build_plate_bounding_box = Box(machine_width * factor, machine_depth * factor) if fixed_nodes is None: fixed_nodes = [] # Add all the items we want to arrange node_items = [] for node in nodes_to_arrange: hull_polygon = node.callDecoration("getConvexHull") converted_points = [] for point in hull_polygon.getPoints(): converted_points.append(Point(point[0] * factor, point[1] * factor)) item = Item(converted_points) node_items.append(item) # Use a tiny margin for the build_plate_polygon (the nesting doesn't like overlapping disallowed areas) half_machine_width = 0.5 * machine_width - 1 half_machine_depth = 0.5 * machine_depth - 1 build_plate_polygon = Polygon(numpy.array([ [half_machine_width, -half_machine_depth], [-half_machine_width, -half_machine_depth], [-half_machine_width, half_machine_depth], [half_machine_width, half_machine_depth] ], numpy.float32)) disallowed_areas = build_volume.getDisallowedAreas() num_disallowed_areas_added = 0 for area in disallowed_areas: converted_points = [] # Clip the disallowed areas so that they don't overlap the bounding box (The arranger chokes otherwise) clipped_area = area.intersectionConvexHulls(build_plate_polygon) for point in clipped_area.getPoints(): converted_points.append(Point(point[0] * factor, point[1] * factor)) disallowed_area = Item(converted_points) disallowed_area.markAsFixedInBin(0) node_items.append(disallowed_area) num_disallowed_areas_added += 1 for node in fixed_nodes: converted_points = [] hull_polygon = node.callDecoration("getConvexHull") for point in hull_polygon.getPoints(): converted_points.append(Point(point[0] * factor, point[1] * factor)) item = Item(converted_points) node_items.append(item) item.markAsFixedInBin(0) node_items.append(item) num_disallowed_areas_added += 1 config = NfpConfig() config.accuracy = 1.0 num_bins = nest(node_items, build_plate_bounding_box, 10000, config) # Strip the disallowed areas from the results again if num_disallowed_areas_added != 0: node_items = node_items[:-num_disallowed_areas_added] found_solution_for_all = num_bins == 1 return found_solution_for_all, node_items
def findNodePlacement(nodes_to_arrange: List["SceneNode"], build_volume: "BuildVolume", fixed_nodes: Optional[List["SceneNode"]] = None, factor = 10000) -> Tuple[bool, List[Item]]: """ Find placement for a set of scene nodes, but don't actually move them just yet. :param nodes_to_arrange: The list of nodes that need to be moved. :param build_volume: The build volume that we want to place the nodes in. It gets size & disallowed areas from this. :param fixed_nodes: List of nods that should not be moved, but should be used when deciding where the others nodes are placed. :param factor: The library that we use is int based. This factor defines how accurate we want it to be. :return: """ machine_width = build_volume.getWidth() machine_depth = build_volume.getDepth() build_plate_bounding_box = Box(machine_width * factor, machine_depth * factor) if fixed_nodes is None: fixed_nodes = [] # Add all the items we want to arrange node_items = [] for node in nodes_to_arrange: hull_polygon = node.callDecoration("getConvexHull") converted_points = [] for point in hull_polygon.getPoints(): converted_points.append(Point(point[0] * factor, point[1] * factor)) item = Item(converted_points) node_items.append(item) # Use a tiny margin for the build_plate_polygon (the nesting doesn't like overlapping disallowed areas) half_machine_width = 0.5 * machine_width - 1 half_machine_depth = 0.5 * machine_depth - 1 build_plate_polygon = Polygon(numpy.array([ [half_machine_width, -half_machine_depth], [-half_machine_width, -half_machine_depth], [-half_machine_width, half_machine_depth], [half_machine_width, half_machine_depth] ], numpy.float32)) disallowed_areas = build_volume.getDisallowedAreas() num_disallowed_areas_added = 0 for area in disallowed_areas: converted_points = [] # Clip the disallowed areas so that they don't overlap the bounding box (The arranger chokes otherwise) clipped_area = area.intersectionConvexHulls(build_plate_polygon) for point in clipped_area.getPoints(): converted_points.append(Point(point[0] * factor, point[1] * factor)) disallowed_area = Item(converted_points) disallowed_area.markAsFixedInBin(0) node_items.append(disallowed_area) num_disallowed_areas_added += 1 for node in fixed_nodes: converted_points = [] hull_polygon = node.callDecoration("getConvexHull") for point in hull_polygon.getPoints(): converted_points.append(Point(point[0] * factor, point[1] * factor)) item = Item(converted_points) node_items.append(item) item.markAsFixedInBin(0) node_items.append(item) num_disallowed_areas_added += 1 config = NfpConfig() config.accuracy = 1.0 num_bins = nest(node_items, build_plate_bounding_box, 10000, config) # Strip the disallowed areas from the results again if num_disallowed_areas_added != 0: node_items = node_items[:-num_disallowed_areas_added] found_solution_for_all = num_bins == 1 return found_solution_for_all, node_items
24,110
def config_proxy_skip(proxies, uri, skip_proxy=False): """ Returns an amended copy of the proxies dictionary - used by `requests`, it will disable the proxy if the uri provided is to be reached directly. :param proxies dict with existing proxies: 'https', 'http', 'no' as pontential keys :param uri uri to determine if proxy is necessary or not. :param skip_proxy if True, the proxy dictionary returned will disable all proxies """ parsed_uri = urlparse(uri) # disable proxy if necessary if skip_proxy: proxies['http'] = '' proxies['https'] = '' elif proxies.get('no'): urls = [] if isinstance(proxies['no'], string_types): urls = proxies['no'].replace(';', ',').split(",") elif isinstance(proxies['no'], list): urls = proxies['no'] for url in urls: if url in parsed_uri.netloc: if 'http' in proxies: proxies.pop('http') if 'https' in proxies: proxies.pop('https') return proxies
def config_proxy_skip(proxies, uri, skip_proxy=False): """ Returns an amended copy of the proxies dictionary - used by `requests`, it will disable the proxy if the uri provided is to be reached directly. :param proxies A dict with existing proxies: `https`, `http`, and `no` are potential keys. :param uri uri to determine if proxy is necessary or not. :param skip_proxy if True, the proxy dictionary returned will disable all proxies """ parsed_uri = urlparse(uri) # disable proxy if necessary if skip_proxy: proxies['http'] = '' proxies['https'] = '' elif proxies.get('no'): urls = [] if isinstance(proxies['no'], string_types): urls = proxies['no'].replace(';', ',').split(",") elif isinstance(proxies['no'], list): urls = proxies['no'] for url in urls: if url in parsed_uri.netloc: if 'http' in proxies: proxies.pop('http') if 'https' in proxies: proxies.pop('https') return proxies
20,226
def ecfr_to_regdown(part_number, file_path=None): """ Extract a regulation Part from eCFR XML, and create regdown content. The default XML source is the latest regulation posting at www.gpo.gov, which gets updated every few days. If `file_path` is specified, a local XML file is parsed instead. DIV1 is a title (as in Title 12) DIV3 is a chapter (not used here) DIV5 is a part DIV6 is a subpart DIV8 is a section DIV9 is an appendix DIV9 element whose HEAD starts with 'Supplement I' is an interpretation To avoid mischief, we make sure the part number is on a allowlist. """ PAYLOAD.reset() if part_number not in PART_ALLOWLIST: raise ValueError('Provided Part number is not a CFPB regulation.') starter = datetime.datetime.now() if file_path: try: with open(file_path, 'r') as f: markup = f.read() except IOError: logger.info("Could not open local file {}".format(file_path)) return else: ecfr_request = requests.get(LATEST_ECFR) if not ecfr_request.ok: logger.info( "ECFR request failed with code {} and reason {}".format( ecfr_request.status_code, ecfr_request.reason)) return ecfr_request.encoding = 'utf-8' markup = ecfr_request.text soup = bS(markup, "lxml-xml") parts = soup.find_all('DIV5') part_soup = [div for div in parts if div['N'] == part_number][0] PAYLOAD.get_effective_date(part_number) PAYLOAD.parse_part(part_soup, part_number) part = PAYLOAD.part PAYLOAD.parse_version(part_soup, part) # parse_subparts will create and associate sections and appendices parse_subparts(part_soup, part) msg = ( "Draft version of Part {} created.\n" "Parsing took {}".format( part_number, (datetime.datetime.now() - starter)) ) return msg
def ecfr_to_regdown(part_number, file_path=None): """ Extract a regulation Part from eCFR XML, and create regdown content. The default XML source is the latest regulation posting at www.gpo.gov, which gets updated every few days. If `file_path` is specified, a local XML file is parsed instead. DIV1 is a title (as in Title 12) DIV3 is a chapter (not used here) DIV5 is a part DIV6 is a subpart DIV8 is a section DIV9 is an appendix DIV9 element whose HEAD starts with 'Supplement I' is an interpretation To avoid mischief, we make sure the part number is on an allowlist. """ PAYLOAD.reset() if part_number not in PART_ALLOWLIST: raise ValueError('Provided Part number is not a CFPB regulation.') starter = datetime.datetime.now() if file_path: try: with open(file_path, 'r') as f: markup = f.read() except IOError: logger.info("Could not open local file {}".format(file_path)) return else: ecfr_request = requests.get(LATEST_ECFR) if not ecfr_request.ok: logger.info( "ECFR request failed with code {} and reason {}".format( ecfr_request.status_code, ecfr_request.reason)) return ecfr_request.encoding = 'utf-8' markup = ecfr_request.text soup = bS(markup, "lxml-xml") parts = soup.find_all('DIV5') part_soup = [div for div in parts if div['N'] == part_number][0] PAYLOAD.get_effective_date(part_number) PAYLOAD.parse_part(part_soup, part_number) part = PAYLOAD.part PAYLOAD.parse_version(part_soup, part) # parse_subparts will create and associate sections and appendices parse_subparts(part_soup, part) msg = ( "Draft version of Part {} created.\n" "Parsing took {}".format( part_number, (datetime.datetime.now() - starter)) ) return msg
5,837
def directionalmean(samples, axis=0, nan_policy='propagate'): """ Computes the directional mean of a sample of vectors. Serves as equivalent of the sample mean for directional data whose magnitude is irrelevant, e. g. unit vectors. Parameters ---------- samples : array_like Input array. Must at least be two-dimensional. axis : int, optional Axis along which directional means are computed. Default is 0. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- directionalmean : ndarray Directional mean. Notes ----- This uses a definition of directional mean from [1]_. Essentially, the calculation is as follows. .. code-block:: python mean=samples.mean() directionalmean = mean/np.linalg.norm(mean) References ---------- .. [1] Mardia, Jupp. (2000). *Directional Statistics* (p. 163). Wiley. Examples -------- >>> data = np.array([[0.8660254, 0.5, 0.], [0.8660254, -0.5, 0.]]) >>> directionalmean(data) array([1., 0., 0.]) The `regular`sample mean in contrast does not lie on the unit sphere. >>> data.mean(axis=0) array([0.8660254, 0., 0.]) """ samples = np.asarray(samples) if samples.ndim < 2: raise ValueError("samples must at least be two-dimensional. " "Instead samples has shape: %r." % samples.shape) contains_nan, nan_policy = _contains_nan(samples, nan_policy) if contains_nan and nan_policy == 'omit': mean = np.nanmean(samples, axis = axis) else: mean = np.mean(samples, axis = axis) directional_mean = mean/np.linalg.norm(mean) return directional_mean
def directionalmean(samples, axis=0, nan_policy='propagate'): """ Computes the directional mean of a sample of vectors. Serves as equivalent of the sample mean for directional data whose magnitude is irrelevant, e. g. unit vectors. Parameters ---------- samples : array_like Input array. Must be at least two-dimensional. axis : int, optional Axis along which directional means are computed. Default is 0. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- directionalmean : ndarray Directional mean. Notes ----- This uses a definition of directional mean from [1]_. Essentially, the calculation is as follows. .. code-block:: python mean=samples.mean() directionalmean = mean/np.linalg.norm(mean) References ---------- .. [1] Mardia, Jupp. (2000). *Directional Statistics* (p. 163). Wiley. Examples -------- >>> data = np.array([[0.8660254, 0.5, 0.], [0.8660254, -0.5, 0.]]) >>> directionalmean(data) array([1., 0., 0.]) The `regular`sample mean in contrast does not lie on the unit sphere. >>> data.mean(axis=0) array([0.8660254, 0., 0.]) """ samples = np.asarray(samples) if samples.ndim < 2: raise ValueError("samples must at least be two-dimensional. " "Instead samples has shape: %r." % samples.shape) contains_nan, nan_policy = _contains_nan(samples, nan_policy) if contains_nan and nan_policy == 'omit': mean = np.nanmean(samples, axis = axis) else: mean = np.mean(samples, axis = axis) directional_mean = mean/np.linalg.norm(mean) return directional_mean
48,565
def vtk_points(points, deep=True, force_float=False): """Convert numpy array or array-like to a ``vtkPoints`` object. Parameters ---------- points : numpy.ndarray or sequence Points to convert. Should be 1 or 2 dimensional. Accepts a single point or several points. deep : bool, optional Perform a deep copy of the array. Only applicable if ``points`` is a :class:`numpy.ndarray`. force_float : bool, optional Casts the datatype to float32 if points datatype are non-float. Set this to ``False`` to allow non-float types, though this may lead to errors when transforming datasets. Returns ------- vtk.vtkPoints The vtkPoints object. Examples -------- >>> import pyvista >>> import numpy as np >>> points = np.random.random((10, 3)) >>> vpoints = pyvista.vtk_points(points) >>> vpoints # doctest:+SKIP (vtkmodules.vtkCommonCore.vtkPoints)0x7f0c2e26af40 """ points = np.asanyarray(points) # verify is numeric if not np.issubdtype(points.dtype, np.number): raise TypeError('Points must be a numeric type') if force_float: if not np.issubdtype(points.dtype, np.floating): warnings.warn( 'Points is not a float type. This can cause issues when ' 'transforming or applying filters. Casting to ' '``np.float32``. Disable this by passing ' '``float_float=False``' ) points = points.astype(np.float32) # check dimensionality if points.ndim == 1: points = points.reshape(-1, 3) elif points.ndim > 2: raise ValueError('Dimension of ``points`` should be 1 or 2, not ' f'{points.ndim}') # verify shape if points.shape[1] != 3: raise ValueError('Points array must contain three values per point. ' f'Shape is {points.shape} and should be (X, 3)') # points must be contiguous points = np.require(points, requirements=['C']) vtkpts = _vtk.vtkPoints() vtk_arr = _vtk.numpy_to_vtk(points, deep=deep) vtkpts.SetData(vtk_arr) return vtkpts
def vtk_points(points, deep=True, force_float=False): """Convert numpy array or array-like to a ``vtkPoints`` object. Parameters ---------- points : numpy.ndarray or sequence Points to convert. Should be 1 or 2 dimensional. Accepts a single point or several points. deep : bool, optional Perform a deep copy of the array. Only applicable if ``points`` is a :class:`numpy.ndarray`. force_float : bool, optional Casts the datatype to ``float32`` if points datatype is non-float. Set this to ``False`` to allow non-float types, though this may lead to truncation of intermediate floats when transforming datasets. Returns ------- vtk.vtkPoints The vtkPoints object. Examples -------- >>> import pyvista >>> import numpy as np >>> points = np.random.random((10, 3)) >>> vpoints = pyvista.vtk_points(points) >>> vpoints # doctest:+SKIP (vtkmodules.vtkCommonCore.vtkPoints)0x7f0c2e26af40 """ points = np.asanyarray(points) # verify is numeric if not np.issubdtype(points.dtype, np.number): raise TypeError('Points must be a numeric type') if force_float: if not np.issubdtype(points.dtype, np.floating): warnings.warn( 'Points is not a float type. This can cause issues when ' 'transforming or applying filters. Casting to ' '``np.float32``. Disable this by passing ' '``float_float=False``' ) points = points.astype(np.float32) # check dimensionality if points.ndim == 1: points = points.reshape(-1, 3) elif points.ndim > 2: raise ValueError('Dimension of ``points`` should be 1 or 2, not ' f'{points.ndim}') # verify shape if points.shape[1] != 3: raise ValueError('Points array must contain three values per point. ' f'Shape is {points.shape} and should be (X, 3)') # points must be contiguous points = np.require(points, requirements=['C']) vtkpts = _vtk.vtkPoints() vtk_arr = _vtk.numpy_to_vtk(points, deep=deep) vtkpts.SetData(vtk_arr) return vtkpts
4,544
def test_nifti_spheres_masker_inverse_overlap(): # Test overlapping data in inverse_transform affine = np.eye(4) shape = (5, 5, 5) data = np.random.random(shape + (5,)) fmri_img = nibabel.Nifti1Image(data, affine) # Apply mask image - to not blow things up to MNI space mask_img = new_img_like(fmri_img, np.ones(shape)) seeds = [(0, 0, 0), (2, 2, 2)] # Inverse data inv_data = np.random.random(len(seeds)) overlapping_masker = NiftiSpheresMasker(seeds, radius=1, allow_overlap=True, mask_img=mask_img).fit() overlapping_masker.inverse_transform(inv_data) overlapping_masker = NiftiSpheresMasker(seeds, radius=2, allow_overlap=True, mask_img=mask_img).fit() overlap = overlapping_masker.inverse_transform(inv_data) # Test whether overlapping data is averaged assert np.isclose(get_data(overlap)[1,1,1], np.mean(inv_data)) noverlapping_masker = NiftiSpheresMasker(seeds, radius=1, allow_overlap=False, mask_img=mask_img).fit() noverlapping_masker.inverse_transform(inv_data) noverlapping_masker = NiftiSpheresMasker(seeds, radius=2, allow_overlap=False, mask_img=mask_img).fit() with pytest.raises(ValueError, match='Overlap detected'): noverlapping_masker.inverse_transform(inv_data)
def test_nifti_spheres_masker_inverse_overlap(): # Test overlapping data in inverse_transform affine = np.eye(4) shape = (5, 5, 5) data = np.random.random(shape + (5,)) fmri_img = nibabel.Nifti1Image(data, affine) # Apply mask image - to not blow things up to MNI space mask_img = new_img_like(fmri_img, np.ones(shape)) seeds = [(0, 0, 0), (2, 2, 2)] # Inverse data inv_data = np.random.random(len(seeds)) overlapping_masker = NiftiSpheresMasker(seeds, radius=1, allow_overlap=True, mask_img=mask_img).fit() overlapping_masker.inverse_transform(inv_data) overlapping_masker = NiftiSpheresMasker(seeds, radius=2, allow_overlap=True, mask_img=mask_img).fit() overlap = overlapping_masker.inverse_transform(inv_data) # Test whether overlapping data is averaged assert_array_almost_equal(get_data(overlap)[1, 1, 1], np.mean(inv_data)) noverlapping_masker = NiftiSpheresMasker(seeds, radius=1, allow_overlap=False, mask_img=mask_img).fit() noverlapping_masker.inverse_transform(inv_data) noverlapping_masker = NiftiSpheresMasker(seeds, radius=2, allow_overlap=False, mask_img=mask_img).fit() with pytest.raises(ValueError, match='Overlap detected'): noverlapping_masker.inverse_transform(inv_data)
23,275
def cmp_using( eq=None, lt=None, le=None, gt=None, ge=None, require_same_type=True, class_name="Comparable", ): """ Utility function that creates a class with customized equality and ordering methods. The resulting class will have a full set of ordering methods if at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided. :param Optional[callable] eq: `callable` used to evaluate equality of two objects. :param Optional[callable] lt: `callable` used to evaluate whether one object is less than another object. :param Optional[callable] le: `callable` used to evaluate whether one object is less than or equal to another object. :param Optional[callable] gt: `callable` used to evaluate whether one object is greater than another object. :param Optional[callable] ge: `callable` used to evaluate whether one object is greater than or equal to another object. :param bool require_same_type: When `True`, equality and ordering methods will return `NotImplemented` if objects are not of the same type. :param Optional[str] class_name: Name of class. Defaults to 'Comparable'. .. versionadded:: 21.1.0 """ body = { "__slots__": ["value"], "__init__": _make_init(), "_requirements": [], "_is_comparable_to": _is_comparable_to, } # Add operations. num_order_fucntions = 0 has_eq_function = False if eq is not None: has_eq_function = True body["__eq__"] = _make_operator("eq", eq) body["__ne__"] = _make_ne() if lt is not None: num_order_fucntions += 1 body["__lt__"] = _make_operator("lt", lt) if le is not None: num_order_fucntions += 1 body["__le__"] = _make_operator("le", le) if gt is not None: num_order_fucntions += 1 body["__gt__"] = _make_operator("gt", gt) if ge is not None: num_order_fucntions += 1 body["__ge__"] = _make_operator("ge", ge) type_ = new_class(class_name, (object,), {}, lambda ns: ns.update(body)) # Add same type requirement. if require_same_type: type_._requirements.append(_check_same_type) # Add total ordering if at least one operation was defined. if 0 < num_order_fucntions < 4: if not has_eq_function: # functools.total_ordering requires __eq__ to be defined, # so raise early error here to keep a nice stack. raise ValueError( "eq must be define is order to complete ordering from " "lt, le, gt, ge." ) type_ = functools.total_ordering(type_) return type_
def cmp_using( eq=None, lt=None, le=None, gt=None, ge=None, require_same_type=True, class_name="Comparable", ): """ Utility function that creates a class with customized equality and ordering methods. The resulting class will have a full set of ordering methods if at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided. :param Optional[callable] eq: `callable` used to evaluate equality of two objects. :param Optional[callable] lt: `callable` used to evaluate whether one object is less than another object. :param Optional[callable] le: `callable` used to evaluate whether one object is less than or equal to another object. :param Optional[callable] gt: `callable` used to evaluate whether one object is greater than another object. :param Optional[callable] ge: `callable` used to evaluate whether one object is greater than or equal to another object. :param bool require_same_type: When `True`, equality and ordering methods will return `NotImplemented` if objects are not of the same type. :param Optional[str] class_name: Name of class. Defaults to 'Comparable'. .. versionadded:: 21.1.0 """ body = { "__slots__": ["value"], "__init__": _make_init(), "_requirements": [], "_is_comparable_to": _is_comparable_to, } # Add operations. num_order_functions = 0 has_eq_function = False if eq is not None: has_eq_function = True body["__eq__"] = _make_operator("eq", eq) body["__ne__"] = _make_ne() if lt is not None: num_order_fucntions += 1 body["__lt__"] = _make_operator("lt", lt) if le is not None: num_order_fucntions += 1 body["__le__"] = _make_operator("le", le) if gt is not None: num_order_fucntions += 1 body["__gt__"] = _make_operator("gt", gt) if ge is not None: num_order_fucntions += 1 body["__ge__"] = _make_operator("ge", ge) type_ = new_class(class_name, (object,), {}, lambda ns: ns.update(body)) # Add same type requirement. if require_same_type: type_._requirements.append(_check_same_type) # Add total ordering if at least one operation was defined. if 0 < num_order_fucntions < 4: if not has_eq_function: # functools.total_ordering requires __eq__ to be defined, # so raise early error here to keep a nice stack. raise ValueError( "eq must be define is order to complete ordering from " "lt, le, gt, ge." ) type_ = functools.total_ordering(type_) return type_
31,346
def main() -> None: """main function, parses params and runs command functions :return: :rtype: """ ''' EXECUTION ''' #LOG('command is %s' % (demisto.command(), )) demisto.debug(f'Command being called is {demisto.command()}') try: LOG('Command being called is {command}'.format(command=demisto.command())) if demisto.command() == 'Picus-GetAccessToken': getAccessToken() elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results token = getAccessToken() demisto.results(vectorCompare(token)) elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(attackResultList(token)) elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional token = getAccessToken() demisto.results(specificThreatsResults(token)) elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses token = getAccessToken() demisto.results(peerList(token)) elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses token = getAccessToken() demisto.results(eMailPeerList(token)) elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors token = getAccessToken() demisto.results(attackAllVectors(token)) elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector token = getAccessToken() demisto.results(attackSingle(token)) elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully token = getAccessToken() demisto.results(triggerUpdate(token)) elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config token = getAccessToken() demisto.results(version(token)) elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(threatList(token)) elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(mitigationList(token)) elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters token = getAccessToken() demisto.results(mitreMatrix(token)) elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(sigmaRulesList(token)) elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination token = getAccessToken() demisto.results(vectorList(token)) elif demisto.command() == 'test-module': demisto.results(test_module()) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
def main() -> None: """main function, parses params and runs command functions :return: :rtype: """ ''' EXECUTION ''' #LOG('command is %s' % (demisto.command(), )) demisto.debug(f'Command being called is {demisto.command()}') try: LOG('Command being called is {command}'.format(command=demisto.command())) if demisto.command() == 'Picus-GetAccessToken': getAccessToken() elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results token = getAccessToken() demisto.results(vectorCompare(token)) elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(attackResultList(token)) elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional token = getAccessToken() demisto.results(specificThreatsResults(token)) elif demisto.command() == 'picus-peer-list': # Returns the peer list with current statuses token = getAccessToken() demisto.results(peerList(token)) elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses token = getAccessToken() demisto.results(eMailPeerList(token)) elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors token = getAccessToken() demisto.results(attackAllVectors(token)) elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector token = getAccessToken() demisto.results(attackSingle(token)) elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully token = getAccessToken() demisto.results(triggerUpdate(token)) elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config token = getAccessToken() demisto.results(version(token)) elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(threatList(token)) elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(mitigationList(token)) elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters token = getAccessToken() demisto.results(mitreMatrix(token)) elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(sigmaRulesList(token)) elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination token = getAccessToken() demisto.results(vectorList(token)) elif demisto.command() == 'test-module': demisto.results(test_module()) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
56,302
def main(): """"Initialize settings (not implemented) and create main window/application.""" parser = argparse.ArgumentParser(description = 'OpenShot version ' + info.SETUP['version']) parser.add_argument('-l', '--lang', action='store', help='language code for interface (overrides ' 'preferences and system environment)') parser.add_argument('--list-languages', dest='list_languages', action='store_true', help='List all language codes supported by OpenShot') parser.add_argument('--path', dest='py_path', action='append', help='Additional locations to search for modules ' '(PYTHONPATH). Can be used multiple times.') parser.add_argument('--test-models', dest='modeltest', action='store_true', help="Load Qt's QAbstractItemModelTester into data models " '(requires Qt 5.11+)') parser.add_argument('-d', '--debug', action='store_true', help='Enable debugging output') parser.add_argument('--debug-file', action='store_true', help='Debugging output (logfile only)') parser.add_argument('--debug-console', action='store_true', help='Debugging output (console only)') # Hidden processing for short-form '-d' synonym for --debug parser.add_argument('-V', '--version', action='store_true') parser.add_argument('remain', nargs=argparse.REMAINDER, help=argparse.SUPPRESS) args = parser.parse_args() # Display version and exit (if requested) if args.version: print("OpenShot version %s" % info.SETUP['version']) sys.exit() # Set up debugging log level to requested streams if args.debug or args.debug_file: info.LOG_LEVEL_FILE = 'DEBUG' if args.debug or args.debug_console: info.LOG_LEVEL_CONSOLE = 'DEBUG' if args.list_languages: from classes.language import get_all_languages print("Supported Languages:") for lang in get_all_languages(): print(" {:>12} {}".format(lang[0],lang[1])) sys.exit() if args.py_path: for p in args.py_path: try: if os.path.exists(os.path.realpath(p)): sys.path.insert(0, os.path.realpath(p)) print("Added {} to PYTHONPATH".format(os.path.realpath(p))) else: print("{} does not exist".format(os.path.realpath(p))) except TypeError as ex: print("Bad path {}: {}".format(p, ex)) continue if args.modeltest: info.MODEL_TEST = True # Set default logging rules, if the user didn't if os.getenv('QT_LOGGING_RULES') is None: os.putenv('QT_LOGGING_RULES', 'qt.modeltest.debug=true') if args.lang: if args.lang in info.SUPPORTED_LANGUAGES: info.CMDLINE_LANGUAGE = args.lang else: print("Unsupported language '{}'! (See --list-languages)".format(args.lang)) sys.exit(-1) # Normal startup, print module path and lauch application print("Loaded modules from: %s" % info.PATH) # Create Qt application, pass any unprocessed arguments from classes.app import OpenShotApp argv = [sys.argv[0]] for arg in args.remain: argv.append(arg) app = OpenShotApp(argv) # Run and return result sys.exit(app.run())
def main(): """"Initialize settings (not implemented) and create main window/application.""" parser = argparse.ArgumentParser(description = 'OpenShot version ' + info.SETUP['version']) parser.add_argument('-l', '--lang', action='store', help='language code for interface (overrides ' 'preferences and system environment)') parser.add_argument('--list-languages', dest='list_languages', action='store_true', help='List all language codes supported by OpenShot') parser.add_argument('--path', dest='py_path', action='append', help='Additional locations to search for modules ' '(PYTHONPATH). Can be used multiple times.') parser.add_argument('--test-models', dest='modeltest', action='store_true', help="Load Qt's QAbstractItemModelTester into data models " '(requires Qt 5.11+)') parser.add_argument('-d', '--debug', action='store_true', help='Enable debugging output') parser.add_argument('--debug-file', action='store_true', help='Debugging output (logfile only)') parser.add_argument('--debug-console', action='store_true', help='Debugging output (console only)') parser.add_argument('-V', '--version', action='store_true') parser.add_argument('remain', nargs=argparse.REMAINDER, help=argparse.SUPPRESS) args = parser.parse_args() # Display version and exit (if requested) if args.version: print("OpenShot version %s" % info.SETUP['version']) sys.exit() # Set up debugging log level to requested streams if args.debug or args.debug_file: info.LOG_LEVEL_FILE = 'DEBUG' if args.debug or args.debug_console: info.LOG_LEVEL_CONSOLE = 'DEBUG' if args.list_languages: from classes.language import get_all_languages print("Supported Languages:") for lang in get_all_languages(): print(" {:>12} {}".format(lang[0],lang[1])) sys.exit() if args.py_path: for p in args.py_path: try: if os.path.exists(os.path.realpath(p)): sys.path.insert(0, os.path.realpath(p)) print("Added {} to PYTHONPATH".format(os.path.realpath(p))) else: print("{} does not exist".format(os.path.realpath(p))) except TypeError as ex: print("Bad path {}: {}".format(p, ex)) continue if args.modeltest: info.MODEL_TEST = True # Set default logging rules, if the user didn't if os.getenv('QT_LOGGING_RULES') is None: os.putenv('QT_LOGGING_RULES', 'qt.modeltest.debug=true') if args.lang: if args.lang in info.SUPPORTED_LANGUAGES: info.CMDLINE_LANGUAGE = args.lang else: print("Unsupported language '{}'! (See --list-languages)".format(args.lang)) sys.exit(-1) # Normal startup, print module path and lauch application print("Loaded modules from: %s" % info.PATH) # Create Qt application, pass any unprocessed arguments from classes.app import OpenShotApp argv = [sys.argv[0]] for arg in args.remain: argv.append(arg) app = OpenShotApp(argv) # Run and return result sys.exit(app.run())
40,668
def create_supervised_trainer(model, optimizer, loss_fn, device=None, non_blocking=False, prepare_batch=_prepare_batch, output_transform=lambda x, y, y_pred, loss: loss.item()): """ Factory function for creating a trainer for supervised models. Args: model (`torch.nn.Module`): the model to train. optimizer (`torch.optim.Optimizer`): the optimizer to use. loss_fn (torch.nn loss function): the loss function to use. device (str, optional): device type specification (default: None). Applies to both model and batches. non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect. prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`. output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value to be assigned to engine's state.output after each iteration. Default is returning loss.item(). If you are going to attach metrics to trainer, you should pass 'output_transform = lambda x, y, y_pred, loss: (y_pred, y,)' Note: `engine.state.output` for this engine is the loss of the processed batch. Returns: Engine: a trainer engine with supervised update function. """ if device: model.to(device) def _update(engine, batch): model.train() optimizer.zero_grad() x, y = prepare_batch(batch, device=device, non_blocking=non_blocking) y_pred = model(x) loss = loss_fn(y_pred, y) loss.backward() optimizer.step() return output_transform(x, y, y_pred, loss) return Engine(_update)
def create_supervised_trainer(model, optimizer, loss_fn, device=None, non_blocking=False, prepare_batch=_prepare_batch, output_transform=lambda x, y, y_pred, loss: loss.item()): """ Factory function for creating a trainer for supervised models. Args: model (`torch.nn.Module`): the model to train. optimizer (`torch.optim.Optimizer`): the optimizer to use. loss_fn (torch.nn loss function): the loss function to use. device (str, optional): device type specification (default: None). Applies to both model and batches. non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect. prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`. output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`. If you are going to attach metrics to trainer, you should pass 'output_transform = lambda x, y, y_pred, loss: (y_pred, y,)' Note: `engine.state.output` for this engine is the loss of the processed batch. Returns: Engine: a trainer engine with supervised update function. """ if device: model.to(device) def _update(engine, batch): model.train() optimizer.zero_grad() x, y = prepare_batch(batch, device=device, non_blocking=non_blocking) y_pred = model(x) loss = loss_fn(y_pred, y) loss.backward() optimizer.step() return output_transform(x, y, y_pred, loss) return Engine(_update)
9,656
def test_get_nanoseconds_from_raw_option(docker_swarm_service): value = docker_swarm_service.get_nanoseconds_from_raw_option(None) assert value is None value = docker_swarm_service.get_nanoseconds_from_raw_option("1m30s500ms") assert value is 90535000000 value = docker_swarm_service.get_nanoseconds_from_raw_option(10000000000) assert value is 10000000000
def test_get_nanoseconds_from_raw_option(docker_swarm_service): value = docker_swarm_service.get_nanoseconds_from_raw_option(None) assert value is None value = docker_swarm_service.get_nanoseconds_from_raw_option("1m30s500ms") assert value is 90500000000 value = docker_swarm_service.get_nanoseconds_from_raw_option(10000000000) assert value is 10000000000
27,047
def downgrade(): """Remove map_index from Log.""" conn = op.get_bind() with op.batch_alter_table("log") as batch_op: if conn.dialect.name == "mssql": for name in _find_map_index_constraints(conn): batch_op.drop_constraint(name) batch_op.drop_column("map_index")
def downgrade(): """Remove map_index from Log.""" conn = op.get_bind() with op.batch_alter_table("log") as batch_op: batch_op.drop_column("map_index", mssql_drop_default=True)
2,035
def test_check_clustering_error(): # Test warning message for continuous values noise = np.random.rand(500) wavelength = np.linspace(0.01, 1, 500) * 1e-6 with pytest(UserWarning): normalized_mutual_info_score(wavelength, noise)
def test_check_clustering_error(): # Test warning message for continuous values noise = np.random.rand(500) wavelength = np.linspace(0.01, 1, 500) * 1e-6 with pytest.warns(UserWarning): normalized_mutual_info_score(wavelength, noise)
36,269
def split_by( adata: AnnData, key: str, groups: Optional[Union[GroupsList, GroupsLists, GroupsDict]] = None, others_key: Optional[str] = None, axis: int = 0, copy: bool = False, ) -> Dict[str, AnnData]: """\ Split adata by obs key. Params ------ adata AnnData object to split. key A key from `.obs` or to `.var` depending on `axis` to use for splitting `adata`. groups Specifies which groups to select from adata `key`. If `None`, `adata` will be split with all values from `key`. It can be a list of groups' names, a list of lists of groups to aggregate, a dict of lists of groups to aggregate. others_key If not `None`, the returned dict will have an additional key with this name and the adata subset object with all groups not specified in `groups` as a value. axis Axis of `adata` to split by `.obs` or `.var` `key`. copy If `True`, all split AnnData objects are copied; otherwise, the returned dict will have views. Returns ------- A dictionay with AnnData objects. If names of the split AnnData objects are not specified (a list of lists in `groups`), they will be created by joining the groups' names. Examples -------- Split by all values in an `.obs` `key`: >>> adata = sc.datasets.pbmc68k_reduced() >>> adatas = sc.get.split_by(adata, 'bulk_labels') >>> adatas {'CD14+ Monocyte': View of AnnData object with n_obs × n_vars = 129 × 765 ..., 'CD19+ B': View of AnnData object with n_obs × n_vars = 95 × 765 ..., ... } Select only specific groups from `.obs` `key`: >>> adata = sc.datasets.pbmc68k_reduced() >>> adatas = sc.get.split_by(adata, 'bulk_labels', ['CD14+ Monocyte', 'CD34+']) >>> adatas {'CD14+ Monocyte': View of AnnData object with n_obs × n_vars = 129 × 765 ..., 'CD34+': View of AnnData object with n_obs × n_vars = 13 × 765 ... } Aggreagte some groups from `.obs` `key`, put all others to `others_key`: >>> adata = sc.datasets.pbmc68k_reduced() >>> adatas = sc.get.split_by(adata, 'bulk_labels', dict(some=['CD14+ Monocyte', 'CD34+']), others_key='others') >>> adatas {'some': View of AnnData object with n_obs × n_vars = 142 × 765 ..., 'others': View of AnnData object with n_obs × n_vars = 558 × 765 ... } """ if key not in adata.obs: raise ValueError(f"No {key} in .obs.") if axis not in (0, 1): raise ValueError("axis should be 0 or 1 only.") attr_by_key = adata.obs[key] if axis == 0 else adata.var[key] select = [slice(None), slice(None)] adatas = {} if groups is None: groups = np.unique(attr_by_key) groups_dict = {} all_values = [] for group in groups: if isinstance(groups, dict): values = groups[group] else: values = group if isinstance(group, list): name = "-".join(str(e) for e in group) else: name = group values = values if isinstance(values, list) else [values] groups_dict[name] = values all_values += values use_others_key = others_key is not None # need to create dict before checking that others_key # is not among the passed groups if use_others_key and others_key in groups_dict: raise ValueError( f"others_key={others_key} coincides with a key in the passed groups." ) for group, values in groups_dict.items(): select[axis] = attr_by_key.isin(values) idx = tuple(select) adatas[group] = adata[idx].copy() if copy else adata[idx] # should be last if use_others_key: mask = ~attr_by_key.isin(all_values) if sum(mask) > 0: select[axis] = mask idx = tuple(select) adatas[others_key] = adata[idx].copy() if copy else adata[idx] return adatas
def split_by( adata: AnnData, key: str, groups: Optional[Union[GroupsList, GroupsLists, GroupsDict]] = None, others_key: Optional[str] = None, axis: int = 0, copy: bool = False, ) -> Dict[str, AnnData]: """\ Split adata by obs key. Params ------ adata AnnData object to split. key A key from `.obs` or to `.var` depending on `axis` to use for splitting `adata`. groups Specifies which groups to select from adata `key`. If `None`, `adata` will be split with all values from `key`. It can be a list of groups' names, a list of lists of groups to aggregate, a dict of lists of groups to aggregate. others_key If not `None`, the returned dict will have an additional key with this name and the adata subset object with all groups not specified in `groups` as a value. axis Axis of `adata` to split by `.obs` or `.var` `key`. copy If `True`, all split AnnData objects are copied; otherwise, the returned dict will have views. Returns ------- A dictionay with AnnData objects. If names of the split AnnData objects are not specified (a list of lists in `groups`), they will be created by joining the groups' names. Examples -------- Split by all values in an `.obs` `key`: >>> adata = sc.datasets.pbmc68k_reduced() >>> adatas = sc.get.split_by(adata, 'bulk_labels') >>> adatas {'CD14+ Monocyte': View of AnnData object with n_obs × n_vars = 129 × 765 ..., 'CD19+ B': View of AnnData object with n_obs × n_vars = 95 × 765 ..., ... } Select only specific groups from `.obs` `key`: >>> adata = sc.datasets.pbmc68k_reduced() >>> adatas = sc.get.split_by(adata, 'bulk_labels', ['CD14+ Monocyte', 'CD34+']) >>> adatas {'CD14+ Monocyte': View of AnnData object with n_obs × n_vars = 129 × 765 ..., 'CD34+': View of AnnData object with n_obs × n_vars = 13 × 765 ... } Aggreagte some groups from `.obs` `key`, put all others to `others_key`: >>> adata = sc.datasets.pbmc68k_reduced() >>> adatas = sc.get.split_by(adata, 'bulk_labels', dict(some=['CD14+ Monocyte', 'CD34+']), others_key='others') >>> adatas {'some': View of AnnData object with n_obs × n_vars = 142 × 765 ..., 'others': View of AnnData object with n_obs × n_vars = 558 × 765 ... } """ if key not in adata.obs: raise ValueError(f"No {key} in .obs.") if axis not in (0, 1): raise ValueError("axis should be 0 or 1 only.") attr_by_key = adata.obs[key] if axis == 0 else adata.var[key] select = [slice(None), slice(None)] adatas = {} if groups is None: groups = attr_by_key.unique() groups_dict = {} all_values = [] for group in groups: if isinstance(groups, dict): values = groups[group] else: values = group if isinstance(group, list): name = "-".join(str(e) for e in group) else: name = group values = values if isinstance(values, list) else [values] groups_dict[name] = values all_values += values use_others_key = others_key is not None # need to create dict before checking that others_key # is not among the passed groups if use_others_key and others_key in groups_dict: raise ValueError( f"others_key={others_key} coincides with a key in the passed groups." ) for group, values in groups_dict.items(): select[axis] = attr_by_key.isin(values) idx = tuple(select) adatas[group] = adata[idx].copy() if copy else adata[idx] # should be last if use_others_key: mask = ~attr_by_key.isin(all_values) if sum(mask) > 0: select[axis] = mask idx = tuple(select) adatas[others_key] = adata[idx].copy() if copy else adata[idx] return adatas
25,796
def load_json_from_zipfile(zf, filepath): with zf.open(filepath, "r") as f: return json.loads(f)
def load_json_from_zipfile(zf, filepath): with zf.open(filepath, "r") as f: return json.load(f)
27,780
def pytest_pycollect_makemodule(fspath: Path, path: py.path.local, parent) -> "Module": if fspath.name == "__init__.py": pkg: Package = Package.from_parent(parent, fspath=path) return pkg mod: Module = Module.from_parent(parent, fspath=path) return mod
def pytest_pycollect_makemodule(fspath: Path, parent) -> "Module": if fspath.name == "__init__.py": pkg: Package = Package.from_parent(parent, fspath=path) return pkg mod: Module = Module.from_parent(parent, fspath=path) return mod
24,860
def my_func(self, doc_type): """This is a docstring. Args: doc_type (str): Google """ return
def my_func(self, doc_type): """ignores_google_return_none Args: doc_type (str): Google """ return
11,480
def main(generate_input, generate_output): with open(generate_input, "r") as reader: data = json.load(reader) spec_folder = data["specFolder"] sdk_folder = "." result = {} python_tag = data.get('python_tag') if data.get('python_tag') else None package_total = set() for input_readme in data["relatedReadmeMdFiles"]: relative_path_readme = str(Path(spec_folder, input_readme)) _LOGGER.info(f"[CODEGEN]({input_readme})codegen begin") config_file = CONFIG_FILE if 'resource-manager' in input_readme else CONFIG_FILE_DPG config = generate(config_file, sdk_folder, [], relative_path_readme, spec_folder, force_generation=True, python_tag=python_tag) package_names = get_package_names(sdk_folder) _LOGGER.info(f"[CODEGEN]({input_readme})codegen end. [(packages:{str(package_names)})]") for folder_name, package_name in package_names: if package_name in package_total: continue package_total.add(package_name) sdk_code_path = str(Path(sdk_folder, folder_name, package_name)) if package_name not in result: package_entry = {} package_entry["packageName"] = package_name package_entry["path"] = [folder_name] package_entry["readmeMd"] = [input_readme] package_entry["tagIsStable"] = not judge_tag_preview(sdk_code_path) result[package_name] = package_entry else: result[package_name]["path"].append(folder_name) result[package_name]["readmeMd"].append(input_readme) # Generate some necessary file for new service init_new_service(package_name, folder_name) # Update metadata try: update_servicemetadata(sdk_folder, data, config, folder_name, package_name, spec_folder, input_readme) except Exception as e: _LOGGER.info(str(e)) # Setup package locally check_call( f"pip install --ignore-requires-python -e {sdk_code_path}", shell=True, ) # remove duplicates for value in result.values(): value["path"] = list(set(value["path"])) value["readmeMd"] = list(set(value["readmeMd"])) with open(generate_output, "w") as writer: json.dump(result, writer)
def main(generate_input, generate_output): with open(generate_input, "r") as reader: data = json.load(reader) spec_folder = data["specFolder"] sdk_folder = "." result = {} python_tag = data.get('python_tag') package_total = set() for input_readme in data["relatedReadmeMdFiles"]: relative_path_readme = str(Path(spec_folder, input_readme)) _LOGGER.info(f"[CODEGEN]({input_readme})codegen begin") config_file = CONFIG_FILE if 'resource-manager' in input_readme else CONFIG_FILE_DPG config = generate(config_file, sdk_folder, [], relative_path_readme, spec_folder, force_generation=True, python_tag=python_tag) package_names = get_package_names(sdk_folder) _LOGGER.info(f"[CODEGEN]({input_readme})codegen end. [(packages:{str(package_names)})]") for folder_name, package_name in package_names: if package_name in package_total: continue package_total.add(package_name) sdk_code_path = str(Path(sdk_folder, folder_name, package_name)) if package_name not in result: package_entry = {} package_entry["packageName"] = package_name package_entry["path"] = [folder_name] package_entry["readmeMd"] = [input_readme] package_entry["tagIsStable"] = not judge_tag_preview(sdk_code_path) result[package_name] = package_entry else: result[package_name]["path"].append(folder_name) result[package_name]["readmeMd"].append(input_readme) # Generate some necessary file for new service init_new_service(package_name, folder_name) # Update metadata try: update_servicemetadata(sdk_folder, data, config, folder_name, package_name, spec_folder, input_readme) except Exception as e: _LOGGER.info(str(e)) # Setup package locally check_call( f"pip install --ignore-requires-python -e {sdk_code_path}", shell=True, ) # remove duplicates for value in result.values(): value["path"] = list(set(value["path"])) value["readmeMd"] = list(set(value["readmeMd"])) with open(generate_output, "w") as writer: json.dump(result, writer)
7,373
def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), overlap_ratio=0.3): """ Masked normalized cross-correlation between arrays. Parameters ---------- arr1 : ndarray First array. arr2 : ndarray Seconds array. The dimensions of `arr2` along axes that are not transformed should be equal to that of `arr1`. m1 : ndarray Mask of `arr1`. The mask should evaluate to `True` (or 1) on valid pixels. `m1` should have the same shape as `arr1`. m2 : ndarray Mask of `arr2`. The mask should evaluate to `True` (or 1) on valid pixels. `m2` should have the same shape as `arr2`. mode : {'full', 'same'}, optional 'full': This returns the convolution at each point of overlap. At the end-points of the convolution, the signals do not overlap completely, and boundary effects may be seen. 'same': The output is the same size as `arr1`, centered with respect to the `‘full’` output. Boundary effects are less prominent. axes : tuple of ints, optional Axes along which to compute the cross-correlation. overlap_ratio : float, optional Minimum allowed overlap ratio between images. The correlation for translations corresponding with an overlap ratio lower than this threshold will be ignored. A lower `overlap_ratio` leads to smaller maximum translation, while a higher `overlap_ratio` leads to greater robustness against spurious matches due to small overlap between masked images. Returns ------- out : ndarray Masked normalized cross-correlation. Raises ------ ValueError : if correlation `mode` is not valid, or array dimensions along non-transformation axes are not equal. References ---------- .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain. IEEE Transactions on Image Processing, vol. 21(5), pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402` .. [2] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """ if mode not in {'full', 'same'}: raise ValueError(f"Correlation mode '{mode}' is not valid.") fixed_image = np.asarray(arr1) moving_image = np.asarray(arr2) float_dtype = _supported_float_type( [fixed_image.dtype, moving_image.dtype] ) if float_dtype.kind == 'c': raise ValueError("complex-valued arr1, arr2 are not supported") fixed_image = fixed_image.astype(float_dtype) fixed_mask = np.array(m1, dtype=bool) moving_image = moving_image.astype(float_dtype) moving_mask = np.array(m2, dtype=bool) eps = np.finfo(float_dtype).eps # Array dimensions along non-transformation axes should be equal. all_axes = set(range(fixed_image.ndim)) for axis in (all_axes - set(axes)): if fixed_image.shape[axis] != moving_image.shape[axis]: raise ValueError( f'Array shapes along non-transformation axes should be ' f'equal, but dimensions along axis {axis} are not.') # Determine final size along transformation axes # Note that it might be faster to compute Fourier transform in a slightly # larger shape (`fast_shape`). Then, after all fourier transforms are done, # we slice back to`final_shape` using `final_slice`. final_shape = list(arr1.shape) for axis in axes: final_shape[axis] = fixed_image.shape[axis] + \ moving_image.shape[axis] - 1 final_shape = tuple(final_shape) final_slice = tuple([slice(0, int(sz)) for sz in final_shape]) # Extent transform axes to the next fast length (i.e. multiple of 3, 5, or # 7) fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes]) # We use the new scipy.fft because they allow leaving the transform axes # unchanged which was not possible with scipy.fftpack's # fftn/ifftn in older versions of SciPy. # E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4) # results in arr_fft shape (4, 4, 7) fft = partial(fftmodule.fftn, s=fast_shape, axes=axes) _ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes) def ifft(x): return _ifft(x).real fixed_image[np.logical_not(fixed_mask)] = 0.0 moving_image[np.logical_not(moving_mask)] = 0.0 # N-dimensional analog to rotation by 180deg is flip over all # relevant axes. # See [1] for discussion. rotated_moving_image = _flip(moving_image, axes=axes) rotated_moving_mask = _flip(moving_mask, axes=axes) fixed_fft = fft(fixed_image) rotated_moving_fft = fft(rotated_moving_image) fixed_mask_fft = fft(fixed_mask.astype(float_dtype)) rotated_moving_mask_fft = fft(rotated_moving_mask.astype(float_dtype)) # Calculate overlap of masks at every point in the convolution. # Locations with high overlap should not be taken into account. number_overlap_masked_px = ifft(rotated_moving_mask_fft * fixed_mask_fft) number_overlap_masked_px[:] = np.round(number_overlap_masked_px) number_overlap_masked_px[:] = np.fmax(number_overlap_masked_px, eps) masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft) masked_correlated_rotated_moving_fft = ifft( fixed_mask_fft * rotated_moving_fft) numerator = ifft(rotated_moving_fft * fixed_fft) numerator -= masked_correlated_fixed_fft * \ masked_correlated_rotated_moving_fft / number_overlap_masked_px fixed_squared_fft = fft(np.square(fixed_image)) fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft) fixed_denom -= np.square(masked_correlated_fixed_fft) / \ number_overlap_masked_px fixed_denom[:] = np.fmax(fixed_denom, 0.0) rotated_moving_squared_fft = fft(np.square(rotated_moving_image)) moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft) moving_denom -= np.square(masked_correlated_rotated_moving_fft) / \ number_overlap_masked_px moving_denom[:] = np.fmax(moving_denom, 0.0) denom = np.sqrt(fixed_denom * moving_denom) # Slice back to expected convolution shape. numerator = numerator[final_slice] denom = denom[final_slice] number_overlap_masked_px = number_overlap_masked_px[final_slice] if mode == 'same': _centering = partial(_centered, newshape=fixed_image.shape, axes=axes) denom = _centering(denom) numerator = _centering(numerator) number_overlap_masked_px = _centering(number_overlap_masked_px) # Pixels where `denom` is very small will introduce large # numbers after division. To get around this problem, # we zero-out problematic pixels. tol = 1e3 * eps * np.max(np.abs(denom), axis=axes, keepdims=True) nonzero_indices = denom > tol # explicitly set out dtype for compatibility with SciPy < 1.4, where # fftmodule will be numpy.fft which always uses float64 dtype. out = np.zeros_like(denom, dtype=float_dtype) out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices] np.clip(out, a_min=-1, a_max=1, out=out) # Apply overlap ratio threshold number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px, axis=axes, keepdims=True) out[number_overlap_masked_px < number_px_threshold] = 0.0 return out
def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), overlap_ratio=0.3): """ Masked normalized cross-correlation between arrays. Parameters ---------- arr1 : ndarray First array. arr2 : ndarray Seconds array. The dimensions of `arr2` along axes that are not transformed should be equal to that of `arr1`. m1 : ndarray Mask of `arr1`. The mask should evaluate to `True` (or 1) on valid pixels. `m1` should have the same shape as `arr1`. m2 : ndarray Mask of `arr2`. The mask should evaluate to `True` (or 1) on valid pixels. `m2` should have the same shape as `arr2`. mode : {'full', 'same'}, optional 'full': This returns the convolution at each point of overlap. At the end-points of the convolution, the signals do not overlap completely, and boundary effects may be seen. 'same': The output is the same size as `arr1`, centered with respect to the `‘full’` output. Boundary effects are less prominent. axes : tuple of ints, optional Axes along which to compute the cross-correlation. overlap_ratio : float, optional Minimum allowed overlap ratio between the two arrays. The correlation for translations corresponding with an overlap ratio lower than this threshold will be ignored. A lower `overlap_ratio` leads to smaller maximum translation, while a higher `overlap_ratio` leads to greater robustness against spurious matches due to small overlap between masked images. Returns ------- out : ndarray Masked normalized cross-correlation. Raises ------ ValueError : if correlation `mode` is not valid, or array dimensions along non-transformation axes are not equal. References ---------- .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain. IEEE Transactions on Image Processing, vol. 21(5), pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402` .. [2] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """ if mode not in {'full', 'same'}: raise ValueError(f"Correlation mode '{mode}' is not valid.") fixed_image = np.asarray(arr1) moving_image = np.asarray(arr2) float_dtype = _supported_float_type( [fixed_image.dtype, moving_image.dtype] ) if float_dtype.kind == 'c': raise ValueError("complex-valued arr1, arr2 are not supported") fixed_image = fixed_image.astype(float_dtype) fixed_mask = np.array(m1, dtype=bool) moving_image = moving_image.astype(float_dtype) moving_mask = np.array(m2, dtype=bool) eps = np.finfo(float_dtype).eps # Array dimensions along non-transformation axes should be equal. all_axes = set(range(fixed_image.ndim)) for axis in (all_axes - set(axes)): if fixed_image.shape[axis] != moving_image.shape[axis]: raise ValueError( f'Array shapes along non-transformation axes should be ' f'equal, but dimensions along axis {axis} are not.') # Determine final size along transformation axes # Note that it might be faster to compute Fourier transform in a slightly # larger shape (`fast_shape`). Then, after all fourier transforms are done, # we slice back to`final_shape` using `final_slice`. final_shape = list(arr1.shape) for axis in axes: final_shape[axis] = fixed_image.shape[axis] + \ moving_image.shape[axis] - 1 final_shape = tuple(final_shape) final_slice = tuple([slice(0, int(sz)) for sz in final_shape]) # Extent transform axes to the next fast length (i.e. multiple of 3, 5, or # 7) fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes]) # We use the new scipy.fft because they allow leaving the transform axes # unchanged which was not possible with scipy.fftpack's # fftn/ifftn in older versions of SciPy. # E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4) # results in arr_fft shape (4, 4, 7) fft = partial(fftmodule.fftn, s=fast_shape, axes=axes) _ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes) def ifft(x): return _ifft(x).real fixed_image[np.logical_not(fixed_mask)] = 0.0 moving_image[np.logical_not(moving_mask)] = 0.0 # N-dimensional analog to rotation by 180deg is flip over all # relevant axes. # See [1] for discussion. rotated_moving_image = _flip(moving_image, axes=axes) rotated_moving_mask = _flip(moving_mask, axes=axes) fixed_fft = fft(fixed_image) rotated_moving_fft = fft(rotated_moving_image) fixed_mask_fft = fft(fixed_mask.astype(float_dtype)) rotated_moving_mask_fft = fft(rotated_moving_mask.astype(float_dtype)) # Calculate overlap of masks at every point in the convolution. # Locations with high overlap should not be taken into account. number_overlap_masked_px = ifft(rotated_moving_mask_fft * fixed_mask_fft) number_overlap_masked_px[:] = np.round(number_overlap_masked_px) number_overlap_masked_px[:] = np.fmax(number_overlap_masked_px, eps) masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft) masked_correlated_rotated_moving_fft = ifft( fixed_mask_fft * rotated_moving_fft) numerator = ifft(rotated_moving_fft * fixed_fft) numerator -= masked_correlated_fixed_fft * \ masked_correlated_rotated_moving_fft / number_overlap_masked_px fixed_squared_fft = fft(np.square(fixed_image)) fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft) fixed_denom -= np.square(masked_correlated_fixed_fft) / \ number_overlap_masked_px fixed_denom[:] = np.fmax(fixed_denom, 0.0) rotated_moving_squared_fft = fft(np.square(rotated_moving_image)) moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft) moving_denom -= np.square(masked_correlated_rotated_moving_fft) / \ number_overlap_masked_px moving_denom[:] = np.fmax(moving_denom, 0.0) denom = np.sqrt(fixed_denom * moving_denom) # Slice back to expected convolution shape. numerator = numerator[final_slice] denom = denom[final_slice] number_overlap_masked_px = number_overlap_masked_px[final_slice] if mode == 'same': _centering = partial(_centered, newshape=fixed_image.shape, axes=axes) denom = _centering(denom) numerator = _centering(numerator) number_overlap_masked_px = _centering(number_overlap_masked_px) # Pixels where `denom` is very small will introduce large # numbers after division. To get around this problem, # we zero-out problematic pixels. tol = 1e3 * eps * np.max(np.abs(denom), axis=axes, keepdims=True) nonzero_indices = denom > tol # explicitly set out dtype for compatibility with SciPy < 1.4, where # fftmodule will be numpy.fft which always uses float64 dtype. out = np.zeros_like(denom, dtype=float_dtype) out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices] np.clip(out, a_min=-1, a_max=1, out=out) # Apply overlap ratio threshold number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px, axis=axes, keepdims=True) out[number_overlap_masked_px < number_px_threshold] = 0.0 return out
36,228
def do_uninstall( packages=False, editable_packages=False, three=None, python=False, system=False, lock=False, all_dev=False, all=False, keep_outdated=False, pypi_mirror=None, ctx=None ): from .environments import PIPENV_USE_SYSTEM from .vendor.requirementslib.models.requirements import Requirement from .vendor.packaging.utils import canonicalize_name # Automatically use an activated virtualenv. if PIPENV_USE_SYSTEM: system = True # Ensure that virtualenv is available. # TODO: We probably shouldn't ensure a project exists if the outcome will be to just # install things in order to remove them... maybe tell the user to install first? ensure_project(three=three, python=python, pypi_mirror=pypi_mirror) # Un-install all dependencies, if --all was provided. if not any([packages, editable_packages, all_dev, all]): raise exceptions.MissingParameter( crayons.red("No package provided!"), ctx=ctx, param_type="parameter", ) editable_pkgs = [ Requirement.from_line("-e {0}".format(p)).name for p in editable_packages if p ] packages += editable_pkgs package_names = set(p for p in packages if p) package_map = { canonicalize_name(p): p for p in packages if p } installed_package_names = project.installed_package_names # Intelligently detect if --dev should be used or not. lockfile_packages = set() if project.lockfile_exists: project_pkg_names = project.lockfile_package_names else: project_pkg_names = project.pipfile_package_names pipfile_remove = True # Uninstall [dev-packages], if --dev was provided. if all_dev: if "dev-packages" not in project.parsed_pipfile and not project_pkg_names["dev"]: click.echo( crayons.normal( "No {0} to uninstall.".format(crayons.red("[dev-packages]")), bold=True, ) ) return click.echo( crayons.normal( fix_utf8("Un-installing {0}…".format(crayons.red("[dev-packages]"))), bold=True ) ) package_names = set(project_pkg_names["dev"]) - set(project_pkg_names["default"]) # Remove known "bad packages" from the list. bad_pkgs = get_canonical_names(BAD_PACKAGES) ignored_packages = bad_pkgs & set(list(package_map.keys())) for ignored_pkg in ignored_packages: if environments.is_verbose(): click.echo("Ignoring {0}.".format(ignored_pkg), err=True) del package_names[package_map[ignored_pkg]] used_packages = project_pkg_names["combined"] & installed_package_names failure = False if all: click.echo( crayons.normal( fix_utf8("Un-installing all {0} and {1}…".format( crayons.red("[dev-packages]"), crayons.red("[packages]"), )), bold=True ) ) do_purge(bare=False, allow_global=system) sys.exit(0) selected_pkg_map = { canonicalize_name(p): p for p in package_names } packages_to_remove = [ p for normalized, p in selected_pkg_map.items() if normalized in (used_packages - bad_pkgs) ] pip_path = None for normalized, package_name in selected_pkg_map.items(): click.echo( crayons.white( fix_utf8("Uninstalling {0}…".format(package_name)), bold=True ) ) # Uninstall the package. if package_name in packages_to_remove: with project.environment.activated(): if pip_path is None: pip_path = which_pip(allow_global=system) cmd = [pip_path, "uninstall", package_name, "-y"] c = run_command(cmd) click.echo(crayons.blue(c.out)) if c.return_code != 0: failure = True if not failure and pipfile_remove: in_packages = project.get_package_name_in_pipfile(package_name, dev=False) in_dev_packages = project.get_package_name_in_pipfile( package_name, dev=True ) if normalized in lockfile_packages: click.echo("{0} {1} {2} {3}".format( crayons.blue("Removing"), crayons.green(package_name), crayons.blue("from"), crayons.white(fix_utf8("Pipfile.lock…"))) ) lockfile = project.get_or_create_lockfile() if normalized in lockfile.default: del lockfile.default[normalized] if normalized in lockfile.develop: del lockfile.develop[normalized] lockfile.write() if not (in_dev_packages or in_packages): if normalized in lockfile_packages: continue click.echo( "No package {0} to remove from Pipfile.".format( crayons.green(package_name) ) ) continue click.echo( fix_utf8("Removing {0} from Pipfile…".format(crayons.green(package_name))) ) # Remove package from both packages and dev-packages. if in_dev_packages: project.remove_package_from_pipfile(package_name, dev=True) if in_packages: project.remove_package_from_pipfile(package_name, dev=False) if lock: do_lock(system=system, keep_outdated=keep_outdated, pypi_mirror=pypi_mirror) sys.exit(int(failure))
def do_uninstall( packages=False, editable_packages=False, three=None, python=False, system=False, lock=False, all_dev=False, all=False, keep_outdated=False, pypi_mirror=None, ctx=None ): from .environments import PIPENV_USE_SYSTEM from .vendor.requirementslib.models.requirements import Requirement from .vendor.packaging.utils import canonicalize_name # Automatically use an activated virtualenv. if PIPENV_USE_SYSTEM: system = True # Ensure that virtualenv is available. # TODO: We probably shouldn't ensure a project exists if the outcome will be to just # install things in order to remove them... maybe tell the user to install first? ensure_project(three=three, python=python, pypi_mirror=pypi_mirror) # Un-install all dependencies, if --all was provided. if not any([packages, editable_packages, all_dev, all]): raise exceptions.MissingParameter( crayons.red("No package provided!"), ctx=ctx, param_type="parameter", ) editable_pkgs = [ Requirement.from_line("-e {0}".format(p)).name for p in editable_packages if p ] packages += editable_pkgs package_names = set(p for p in packages if p) package_map = { canonicalize_name(p): p for p in packages if p } installed_package_names = project.installed_package_names # Intelligently detect if --dev should be used or not. lockfile_packages = set() if project.lockfile_exists: project_pkg_names = project.lockfile_package_names else: project_pkg_names = project.pipfile_package_names pipfile_remove = True # Uninstall [dev-packages], if --dev was provided. if all_dev: if "dev-packages" not in project.parsed_pipfile and not project_pkg_names["dev"]: click.echo( crayons.normal( "No {0} to uninstall.".format(crayons.red("[dev-packages]")), bold=True, ) ) return click.echo( crayons.normal( fix_utf8("Un-installing {0}…".format(crayons.red("[dev-packages]"))), bold=True ) ) package_names = set(project_pkg_names["dev"]) - set(project_pkg_names["default"]) # Remove known "bad packages" from the list. bad_pkgs = get_canonical_names(BAD_PACKAGES) ignored_packages = bad_pkgs & set(list(package_map.keys())) for ignored_pkg in ignored_packages: if environments.is_verbose(): click.echo("Ignoring {0}.".format(ignored_pkg), err=True) package_names.discard(package_map[ignored_pkg]) used_packages = project_pkg_names["combined"] & installed_package_names failure = False if all: click.echo( crayons.normal( fix_utf8("Un-installing all {0} and {1}…".format( crayons.red("[dev-packages]"), crayons.red("[packages]"), )), bold=True ) ) do_purge(bare=False, allow_global=system) sys.exit(0) selected_pkg_map = { canonicalize_name(p): p for p in package_names } packages_to_remove = [ p for normalized, p in selected_pkg_map.items() if normalized in (used_packages - bad_pkgs) ] pip_path = None for normalized, package_name in selected_pkg_map.items(): click.echo( crayons.white( fix_utf8("Uninstalling {0}…".format(package_name)), bold=True ) ) # Uninstall the package. if package_name in packages_to_remove: with project.environment.activated(): if pip_path is None: pip_path = which_pip(allow_global=system) cmd = [pip_path, "uninstall", package_name, "-y"] c = run_command(cmd) click.echo(crayons.blue(c.out)) if c.return_code != 0: failure = True if not failure and pipfile_remove: in_packages = project.get_package_name_in_pipfile(package_name, dev=False) in_dev_packages = project.get_package_name_in_pipfile( package_name, dev=True ) if normalized in lockfile_packages: click.echo("{0} {1} {2} {3}".format( crayons.blue("Removing"), crayons.green(package_name), crayons.blue("from"), crayons.white(fix_utf8("Pipfile.lock…"))) ) lockfile = project.get_or_create_lockfile() if normalized in lockfile.default: del lockfile.default[normalized] if normalized in lockfile.develop: del lockfile.develop[normalized] lockfile.write() if not (in_dev_packages or in_packages): if normalized in lockfile_packages: continue click.echo( "No package {0} to remove from Pipfile.".format( crayons.green(package_name) ) ) continue click.echo( fix_utf8("Removing {0} from Pipfile…".format(crayons.green(package_name))) ) # Remove package from both packages and dev-packages. if in_dev_packages: project.remove_package_from_pipfile(package_name, dev=True) if in_packages: project.remove_package_from_pipfile(package_name, dev=False) if lock: do_lock(system=system, keep_outdated=keep_outdated, pypi_mirror=pypi_mirror) sys.exit(int(failure))
38,569
def uniquify_point_set( points: np.ndarray[Any, np.dtype[np.float64]], tol: float = 1e-8 ) -> Tuple[ np.ndarray[Any, np.dtype[np.float64]], np.ndarray[Any, np.dtype[np.int64]], np.ndarray[Any, np.dtype[np.int64]], ]: """Uniquify a set of points so that no two sets of points are closer than a distance tol from each other. This function is partially overlapping by the function unique_columns_tol, but the latter is more general, as it provides fast treatment of integer arrays. FIXME: It should be possible to unify the two implementations, however, more experience is needed before doing so. Parameters: mat (np.ndarray, nd x n_pts): Columns to be uniquified. tol (double, optional): Tolerance for when columns are considered equal. Should be seen in connection with distance between the points in the points (due to rounding errors). Defaults to 1e-8. Returns: np.ndarray: Unique columns. new_2_old: Index of which points that are preserved old_2_new: Index of the representation of old points in the reduced list. """ # The implementation uses Scipy's KDTree implementation to efficiently get # the distance between points. num_p = points.shape[1] # Transpose needed to comply with KDTree. tree = KDTree(points.T) # Get all pairs of points closer than the tolerance. pairs = tree.query_pairs(tol, output_type="ndarray") if pairs.size == 0: # No points were find, we can return return points, np.arange(num_p), np.arange(num_p) # Process information to arive at a unique point set. This is technical, # since we need to deal with cases where more than two points coincide # (example: if the points p1, p2 and p3 coincide, they will be identified # either by the pairs {(i1, i2), (i1, i3)}, by {(i1, i2), (i2, i3)}, # or by {(i1, i3), (i2, i3)}). # Sort the index pairs of identical points for simpler identification. # NOTE: pairs, as returned by KDTree, is a num_pairs x 2 array, thus # sorting the pairs should be along axis 1. pair_arr = np.sort(pairs, axis=1) # Sort the pairs along axis=1. The lexsort will make the sorting first # according to pair_arr[:, 0] (the point with the lowest index in each # pair), and then according to the second index (pair_arr[:, 1]). The # result will be a lexiograpically ordered array. # Also note the transport back to a 2 x num_pairs array. sorted_arr = pair_arr[np.lexsort((pair_arr[:, 1], pair_arr[:, 0]))].T # Find points that are both in the first and second row. This will identify # triplets identified by pairs {(i1, i2), (i2, i3)} as described above. duplicate = np.isin(sorted_arr[0], sorted_arr[1]) # Array with duplicates of the type {(i1, i2), (i1, i3)} removed. reduced_arr = sorted_arr[:, np.logical_not(duplicate)] # Also identify points that are not involved in any pairs, these should be # included in the unique set. Append these to the point array. not_in_pairs = np.setdiff1d(np.arange(points.shape[1]), pair_arr.ravel()) reduced_arr = np.hstack((reduced_arr, np.tile(not_in_pairs, (2, 1)))) # The array can still contain pairs of type {(i1, i2), (i1, i3)} and # {(i1, i3), (i1, i3)}. These can be identified by a unique on the first # row. ia = np.unique(reduced_arr[0]) # Make a mapping from all points to the reduced set. ib = np.arange(num_p) _, inv_map = np.unique(reduced_arr[0], return_inverse=True) ib[reduced_arr[0]] = inv_map ib[reduced_arr[1]] = ib[reduced_arr[0]] # Uniquify points. upoints = points[:, ia] # Done. return upoints, ia, ib
def uniquify_point_set( points: np.ndarray[Any, np.dtype[np.float64]], tol: float = 1e-8 ) -> Tuple[ np.ndarray[Any, np.dtype[np.float64]], np.ndarray[Any, np.dtype[np.int64]], np.ndarray[Any, np.dtype[np.int64]], ]: """Uniquify a set of points so that no two sets of points are closer than a distance tol from each other. This function is partially overlapping by the function unique_columns_tol, but the latter is more general, as it provides fast treatment of integer arrays. FIXME: It should be possible to unify the two implementations, however, more experience is needed before doing so. Parameters: mat (np.ndarray, nd x n_pts): Columns to be uniquified. tol (double, optional): Tolerance for when columns are considered equal. Should be seen in connection with distance between the points in the point set (due to rounding errors). Defaults to 1e-8. Returns: np.ndarray: Unique columns. new_2_old: Index of which points that are preserved old_2_new: Index of the representation of old points in the reduced list. """ # The implementation uses Scipy's KDTree implementation to efficiently get # the distance between points. num_p = points.shape[1] # Transpose needed to comply with KDTree. tree = KDTree(points.T) # Get all pairs of points closer than the tolerance. pairs = tree.query_pairs(tol, output_type="ndarray") if pairs.size == 0: # No points were find, we can return return points, np.arange(num_p), np.arange(num_p) # Process information to arive at a unique point set. This is technical, # since we need to deal with cases where more than two points coincide # (example: if the points p1, p2 and p3 coincide, they will be identified # either by the pairs {(i1, i2), (i1, i3)}, by {(i1, i2), (i2, i3)}, # or by {(i1, i3), (i2, i3)}). # Sort the index pairs of identical points for simpler identification. # NOTE: pairs, as returned by KDTree, is a num_pairs x 2 array, thus # sorting the pairs should be along axis 1. pair_arr = np.sort(pairs, axis=1) # Sort the pairs along axis=1. The lexsort will make the sorting first # according to pair_arr[:, 0] (the point with the lowest index in each # pair), and then according to the second index (pair_arr[:, 1]). The # result will be a lexiograpically ordered array. # Also note the transport back to a 2 x num_pairs array. sorted_arr = pair_arr[np.lexsort((pair_arr[:, 1], pair_arr[:, 0]))].T # Find points that are both in the first and second row. This will identify # triplets identified by pairs {(i1, i2), (i2, i3)} as described above. duplicate = np.isin(sorted_arr[0], sorted_arr[1]) # Array with duplicates of the type {(i1, i2), (i1, i3)} removed. reduced_arr = sorted_arr[:, np.logical_not(duplicate)] # Also identify points that are not involved in any pairs, these should be # included in the unique set. Append these to the point array. not_in_pairs = np.setdiff1d(np.arange(points.shape[1]), pair_arr.ravel()) reduced_arr = np.hstack((reduced_arr, np.tile(not_in_pairs, (2, 1)))) # The array can still contain pairs of type {(i1, i2), (i1, i3)} and # {(i1, i3), (i1, i3)}. These can be identified by a unique on the first # row. ia = np.unique(reduced_arr[0]) # Make a mapping from all points to the reduced set. ib = np.arange(num_p) _, inv_map = np.unique(reduced_arr[0], return_inverse=True) ib[reduced_arr[0]] = inv_map ib[reduced_arr[1]] = ib[reduced_arr[0]] # Uniquify points. upoints = points[:, ia] # Done. return upoints, ia, ib
5,718
def leastsq(func, x0, args=(), Dfun=None, full_output=0, col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8, gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): """ Minimize the sum of squares of a set of equations. :: x = arg min(sum(func(y)**2,axis=0)) y Parameters ---------- func : callable Should take at least one (possibly length N vector) argument and returns M floating point numbers. It must not return NaNs or fitting might fail. M must be greater than or equal to N (M >= N). x0 : ndarray The starting estimate for the minimization. args : tuple, optional Any extra arguments to func are placed in this tuple. Dfun : callable, optional A function or method to compute the Jacobian of func with derivatives across the rows. If this is None, the Jacobian will be estimated. full_output : bool, optional non-zero to return all optional outputs. col_deriv : bool, optional non-zero to specify that the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). ftol : float, optional Relative error desired in the sum of squares. xtol : float, optional Relative error desired in the approximate solution. gtol : float, optional Orthogonality desired between the function vector and the columns of the Jacobian. maxfev : int, optional The maximum number of calls to the function. If `Dfun` is provided, then the default `maxfev` is 100*(N+1) where N is the number of elements in x0, otherwise the default `maxfev` is 200*(N+1). epsfcn : float, optional A variable used in determining a suitable step length for the forward- difference approximation of the Jacobian (for Dfun=None). Normally the actual step length will be sqrt(epsfcn)*x If epsfcn is less than the machine precision, it is assumed that the relative errors are of the order of the machine precision. factor : float, optional A parameter determining the initial step bound (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. diag : sequence, optional N positive entries that serve as a scale factors for the variables. Returns ------- x : ndarray The solution (or the result of the last iteration for an unsuccessful call). cov_x : ndarray The inverse of the Hessian. `fjac` and `ipvt` are used to construct an estimate of the Hessian. A value of None indicates a singular matrix, which means the curvature in parameters `x` is numerically flat. To obtain the covariance matrix of the parameters `x`, `cov_x` must be multiplied by the variance of the residuals -- see curve_fit. infodict : dict a dictionary of optional outputs with the keys: ``nfev`` The number of function calls ``fvec`` The function evaluated at the output ``fjac`` A permutation of the R matrix of a QR factorization of the final approximate Jacobian matrix, stored column wise. Together with ipvt, the covariance of the estimate can be approximated. ``ipvt`` An integer array of length N which defines a permutation matrix, p, such that fjac*p = q*r, where r is upper triangular with diagonal elements of nonincreasing magnitude. Column j of p is column ipvt(j) of the identity matrix. ``qtf`` The vector (transpose(q) * fvec). mesg : str A string message giving information about the cause of failure. ier : int An integer flag. If it is equal to 1, 2, 3 or 4, the solution was found. Otherwise, the solution was not found. In either case, the optional output variable 'mesg' gives more information. See Also -------- least_squares : Newer interface to solve nonlinear least-squares problems with bounds on the variables. See ``method=='lm'`` in particular. Notes ----- "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. cov_x is a Jacobian approximation to the Hessian of the least squares objective function. This approximation assumes that the objective function is based on the difference between some observed target data (ydata) and a (non-linear) function of the parameters `f(xdata, params)` :: func(params) = ydata - f(xdata, params) so that the objective function is :: min sum((ydata - f(xdata, params))**2, axis=0) params The solution, `x`, is always a 1-D array, regardless of the shape of `x0`, or whether `x0` is a scalar. Examples -------- >>> from scipy.optimize import leastsq >>> def func(x): ... return 2*(x-3)**2+1 >>> leastsq(func, 0) (array([2.99999999]), 1) """ x0 = asarray(x0).flatten() n = len(x0) if not isinstance(args, tuple): args = (args,) shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) m = shape[0] if n > m: raise TypeError('Improper input: func input vector length N=%s must ' 'not exceed func output vector length M=%s' % (n, m)) if epsfcn is None: epsfcn = finfo(dtype).eps if Dfun is None: if maxfev == 0: maxfev = 200*(n + 1) retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, gtol, maxfev, epsfcn, factor, diag) else: if col_deriv: _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) else: _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) if maxfev == 0: maxfev = 100 * (n + 1) retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv, ftol, xtol, gtol, maxfev, factor, diag) errors = {0: ["Improper input parameters.", TypeError], 1: ["Both actual and predicted relative reductions " "in the sum of squares\n are at most %f" % ftol, None], 2: ["The relative error between two consecutive " "iterates is at most %f" % xtol, None], 3: ["Both actual and predicted relative reductions in " "the sum of squares\n are at most %f and the " "relative error between two consecutive " "iterates is at \n most %f" % (ftol, xtol), None], 4: ["The cosine of the angle between func(x) and any " "column of the\n Jacobian is at most %f in " "absolute value" % gtol, None], 5: ["Number of calls to function has reached " "maxfev = %d." % maxfev, ValueError], 6: ["ftol=%f is too small, no further reduction " "in the sum of squares\n is possible." % ftol, ValueError], 7: ["xtol=%f is too small, no further improvement in " "the approximate\n solution is possible." % xtol, ValueError], 8: ["gtol=%f is too small, func(x) is orthogonal to the " "columns of\n the Jacobian to machine " "precision." % gtol, ValueError]} # The FORTRAN return value (possible return values are >= 0 and <= 8) info = retval[-1] if full_output: cov_x = None if info in LEASTSQ_SUCCESS: perm = take(eye(n), retval[1]['ipvt'] - 1, 0) r = triu(transpose(retval[1]['fjac'])[:n, :]) R = dot(r, perm) try: cov_x = inv(dot(transpose(R), R)) except (LinAlgError, ValueError): pass return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info) else: if info in LEASTSQ_FAILURE: warnings.warn(errors[info][0], RuntimeWarning) elif info == 0: raise errors[info][1](errors[info][0]) return retval[0], info
def leastsq(func, x0, args=(), Dfun=None, full_output=0, col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8, gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): """ Minimize the sum of squares of a set of equations. :: x = arg min(sum(func(y)**2,axis=0)) y Parameters ---------- func : callable Should take at least one (possibly length N vector) argument and returns M floating point numbers. It must not return NaNs or fitting might fail. ``M`` must be greater than or equal to ``N``. x0 : ndarray The starting estimate for the minimization. args : tuple, optional Any extra arguments to func are placed in this tuple. Dfun : callable, optional A function or method to compute the Jacobian of func with derivatives across the rows. If this is None, the Jacobian will be estimated. full_output : bool, optional non-zero to return all optional outputs. col_deriv : bool, optional non-zero to specify that the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). ftol : float, optional Relative error desired in the sum of squares. xtol : float, optional Relative error desired in the approximate solution. gtol : float, optional Orthogonality desired between the function vector and the columns of the Jacobian. maxfev : int, optional The maximum number of calls to the function. If `Dfun` is provided, then the default `maxfev` is 100*(N+1) where N is the number of elements in x0, otherwise the default `maxfev` is 200*(N+1). epsfcn : float, optional A variable used in determining a suitable step length for the forward- difference approximation of the Jacobian (for Dfun=None). Normally the actual step length will be sqrt(epsfcn)*x If epsfcn is less than the machine precision, it is assumed that the relative errors are of the order of the machine precision. factor : float, optional A parameter determining the initial step bound (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. diag : sequence, optional N positive entries that serve as a scale factors for the variables. Returns ------- x : ndarray The solution (or the result of the last iteration for an unsuccessful call). cov_x : ndarray The inverse of the Hessian. `fjac` and `ipvt` are used to construct an estimate of the Hessian. A value of None indicates a singular matrix, which means the curvature in parameters `x` is numerically flat. To obtain the covariance matrix of the parameters `x`, `cov_x` must be multiplied by the variance of the residuals -- see curve_fit. infodict : dict a dictionary of optional outputs with the keys: ``nfev`` The number of function calls ``fvec`` The function evaluated at the output ``fjac`` A permutation of the R matrix of a QR factorization of the final approximate Jacobian matrix, stored column wise. Together with ipvt, the covariance of the estimate can be approximated. ``ipvt`` An integer array of length N which defines a permutation matrix, p, such that fjac*p = q*r, where r is upper triangular with diagonal elements of nonincreasing magnitude. Column j of p is column ipvt(j) of the identity matrix. ``qtf`` The vector (transpose(q) * fvec). mesg : str A string message giving information about the cause of failure. ier : int An integer flag. If it is equal to 1, 2, 3 or 4, the solution was found. Otherwise, the solution was not found. In either case, the optional output variable 'mesg' gives more information. See Also -------- least_squares : Newer interface to solve nonlinear least-squares problems with bounds on the variables. See ``method=='lm'`` in particular. Notes ----- "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. cov_x is a Jacobian approximation to the Hessian of the least squares objective function. This approximation assumes that the objective function is based on the difference between some observed target data (ydata) and a (non-linear) function of the parameters `f(xdata, params)` :: func(params) = ydata - f(xdata, params) so that the objective function is :: min sum((ydata - f(xdata, params))**2, axis=0) params The solution, `x`, is always a 1-D array, regardless of the shape of `x0`, or whether `x0` is a scalar. Examples -------- >>> from scipy.optimize import leastsq >>> def func(x): ... return 2*(x-3)**2+1 >>> leastsq(func, 0) (array([2.99999999]), 1) """ x0 = asarray(x0).flatten() n = len(x0) if not isinstance(args, tuple): args = (args,) shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) m = shape[0] if n > m: raise TypeError('Improper input: func input vector length N=%s must ' 'not exceed func output vector length M=%s' % (n, m)) if epsfcn is None: epsfcn = finfo(dtype).eps if Dfun is None: if maxfev == 0: maxfev = 200*(n + 1) retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, gtol, maxfev, epsfcn, factor, diag) else: if col_deriv: _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) else: _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) if maxfev == 0: maxfev = 100 * (n + 1) retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv, ftol, xtol, gtol, maxfev, factor, diag) errors = {0: ["Improper input parameters.", TypeError], 1: ["Both actual and predicted relative reductions " "in the sum of squares\n are at most %f" % ftol, None], 2: ["The relative error between two consecutive " "iterates is at most %f" % xtol, None], 3: ["Both actual and predicted relative reductions in " "the sum of squares\n are at most %f and the " "relative error between two consecutive " "iterates is at \n most %f" % (ftol, xtol), None], 4: ["The cosine of the angle between func(x) and any " "column of the\n Jacobian is at most %f in " "absolute value" % gtol, None], 5: ["Number of calls to function has reached " "maxfev = %d." % maxfev, ValueError], 6: ["ftol=%f is too small, no further reduction " "in the sum of squares\n is possible." % ftol, ValueError], 7: ["xtol=%f is too small, no further improvement in " "the approximate\n solution is possible." % xtol, ValueError], 8: ["gtol=%f is too small, func(x) is orthogonal to the " "columns of\n the Jacobian to machine " "precision." % gtol, ValueError]} # The FORTRAN return value (possible return values are >= 0 and <= 8) info = retval[-1] if full_output: cov_x = None if info in LEASTSQ_SUCCESS: perm = take(eye(n), retval[1]['ipvt'] - 1, 0) r = triu(transpose(retval[1]['fjac'])[:n, :]) R = dot(r, perm) try: cov_x = inv(dot(transpose(R), R)) except (LinAlgError, ValueError): pass return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info) else: if info in LEASTSQ_FAILURE: warnings.warn(errors[info][0], RuntimeWarning) elif info == 0: raise errors[info][1](errors[info][0]) return retval[0], info
41,241
def test_leaves_big(): a = cirq.NamedQubit('a') circuit = cirq.Circuit([cirq.Moment([cirq.Z(a) ** 0.1])]) cirq.testing.assert_same_circuits(cirq.drop_negligible_operations(circuit, atol=0.001), circuit)
def test_leaves_big(): a = cirq.NamedQubit('a') circuit = cirq.Circuit(cirq.Moment(cirq.Z(a) ** 0.1)) cirq.testing.assert_same_circuits(cirq.drop_negligible_operations(circuit, atol=0.001), circuit)
29,036
def get_agent_dest_path(host: VictimHost, options: Mapping[str, Any]) -> Path: if host.os["type"] == "windows": path = PureWindowsPath(options["dropper_target_path_win_64"]) else: path = PurePosixPath(options["dropper_target_path_linux"]) return _add_random_suffix(path)
def get_agent_dest_path(host: VictimHost, options: Mapping[str, Any]) -> PurePath: if host.os["type"] == "windows": path = PureWindowsPath(options["dropper_target_path_win_64"]) else: path = PurePosixPath(options["dropper_target_path_linux"]) return _add_random_suffix(path)
32,411
def generate_dbotscore(response: Dict) -> List: """Creates CommandResult object based on the contents of 'response' argument and provides DBotScore objects. Parameters ---------- response : dict Object returned by ANYRUN API call in 'get_report' function. Returns ------- List A list of CommandResults objects. """ data = response.get('data', {}) analysis = data.get('analysis', {}) main_object = analysis.get('content', {}).get('mainObject', {}) submission_type = main_object.get('type') submission_type = 'hash' if submission_type in {'file', 'download'} else submission_type threat_text = analysis.get('scores', {}).get('verdict', {}).get('threatLevelText', '').casefold() reputation_map = { "shared": Common.DBotScore.NONE, "unknown": Common.DBotScore.NONE, "whitelisted": Common.DBotScore.GOOD, "malicious": Common.DBotScore.BAD, "suspicious": Common.DBotScore.SUSPICIOUS } returned_data = [] main_entity = None main_entity_type = None # Add the hash or URL first if submission_type == 'hash': hashes = main_object.get('hashes', {}) info = main_object.get('info', {}) file_type = info.get('file') exif = info.get('exif', {}) main_entity = hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5') main_entity_type = FeedIndicatorType.File dbot_score = Common.DBotScore( indicator=hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5'), indicator_type=DBotScoreType.FILE, integration_name='ANYRUN', score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE ) returned_data.append(CommandResults( indicator=Common.File( dbot_score=dbot_score, md5=hashes.get('md5'), sha1=hashes.get('sha1'), sha256=hashes.get('sha256'), file_type=file_type, associated_file_names=exif.get('OriginalFileName') ) )) else: main_entity = main_object.get('url') main_entity_type = FeedIndicatorType.URL url_outputs = { 'Data': main_object.get('url') } dbot_score = Common.DBotScore( indicator=main_object.get('url'), indicator_type=DBotScoreType.URL, integration_name='ANYRUN', score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE ) if dbot_score.score >= 2: url_outputs['Malicious'] = { 'Vendor': 'ANYRUN', 'Description': threat_text } returned_data.append(CommandResults( outputs_prefix='URL', outputs_key_field=['Data'], outputs=url_outputs, indicator=Common.URL( url=main_object.get('url'), dbot_score=dbot_score, ) )) # Check if network information is available in the report if 'network' in data: network_data = data.get('network') # Then add all the network-related indicators - 'connections' if 'connections' in network_data: connections = network_data.get('connections') for current_connection in connections: reputation = current_connection.get('Reputation') if reputation in reputation_map.keys(): current_dbot_score = Common.DBotScore( indicator=current_connection.get('IP'), indicator_type=DBotScoreType.IP, integration_name='ANYRUN', score=reputation_map[reputation] ) relationships = [EntityRelationship( name=EntityRelationship.Relationships.COMMUNICATED_WITH, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=current_connection.get('IP'), entity_b_type=FeedIndicatorType.IP, brand="ANYRUN" )] ip_indicator = Common.IP( ip=current_connection.get('IP'), asn=current_connection.get('ASN'), port=current_connection.get('Port'), geo_country=current_connection.get('Country'), dbot_score=current_dbot_score, relationships=relationships ) if current_connection.get('IP') not in [ x.indicator.ip for x in returned_data if isinstance(x.indicator, Common.IP) ]: returned_data.append(CommandResults( readable_output=tableToMarkdown( f"{current_connection.get('IP')}", [{ "Description": f"This IP was observed after detonation of {main_entity} in ANYRUN" }] ), indicator=ip_indicator, relationships=relationships )) # Then add all the network-related indicators - 'dnsRequests' if 'dnsRequests' in network_data: for current_dnsRequests in network_data.get('dnsRequests'): reputation = current_dnsRequests.get('Reputation') if reputation in reputation_map.keys(): current_dbot_score = Common.DBotScore( indicator=current_dnsRequests.get('Domain'), indicator_type=DBotScoreType.DOMAIN, integration_name='ANYRUN', score=reputation_map[reputation] ) relationships = [EntityRelationship( name=EntityRelationship.Relationships.COMMUNICATED_WITH, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=current_dnsRequests.get('Domain'), entity_b_type=FeedIndicatorType.Domain, brand="ANYRUN" )] if "IP" in current_dnsRequests: for ip in current_dnsRequests.get('IP', []): relationships.append( EntityRelationship( name=EntityRelationship.Relationships.RESOLVES_TO, entity_a=current_dnsRequests.get('Domain'), entity_a_type=FeedIndicatorType.Domain, entity_b=ip, entity_b_type=FeedIndicatorType.IP ) ) domain_ip_dbot_score = Common.DBotScore( indicator=ip, indicator_type=DBotScoreType.IP, integration_name="ANYRUN", score=Common.DBotScore.NONE ) domain_ip_indicator = Common.IP( ip=ip, dbot_score=domain_ip_dbot_score ) returned_data.append(CommandResults( indicator=domain_ip_indicator, readable_output=tableToMarkdown( f"{ip}", [{ "Description": f"This IP was resovled from {current_dnsRequests.get('Domain')}" }] ) )) domain_indicator = Common.Domain( domain=current_dnsRequests.get('Domain'), dbot_score=current_dbot_score, relationships=relationships ) if current_dnsRequests.get('Domain') not in [ x.indicator.domain for x in returned_data if isinstance(x.indicator, Common.Domain) ]: returned_data.append(CommandResults( readable_output=tableToMarkdown( f"{current_dnsRequests.get('Domain')}", [{ "Description": f"This domain was observed after detonation of {main_entity} in ANYRUN" }] ), indicator=domain_indicator, relationships=relationships )) # Then add all the network-related indicators - 'httpRequests' if 'httpRequests' in network_data: for current_httpRequests in network_data.get('httpRequests'): reputation = current_httpRequests['Reputation'] if reputation in reputation_map.keys(): current_dbot_score = Common.DBotScore( indicator=current_httpRequests.get('URL'), indicator_type=DBotScoreType.URL, integration_name='ANYRUN', score=reputation_map[reputation] ) relationships = [EntityRelationship( name=EntityRelationship.Relationships.COMMUNICATED_WITH, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=current_httpRequests.get('URL'), entity_b_type=FeedIndicatorType.URL, brand="ANYRUN" )] url_indicator = Common.URL( url=current_httpRequests.get('URL'), geo_country=current_httpRequests.get('Country'), port=current_httpRequests.get('Port'), dbot_score=current_dbot_score, relationships=relationships ) if current_httpRequests.get('URL') not in [ x.indicator.url for x in returned_data if isinstance(x.indicator, Common.URL) ]: returned_data.append(CommandResults( readable_output=tableToMarkdown( f"{current_httpRequests.get('URL')}", [{ "Description": f"This URL was observed after detonation of {main_entity} in ANYRUN" }] ), indicator=url_indicator, relationships=relationships )) if 'mitre' in data: mitre_data = data.get('mitre') for item in mitre_data: relationships = [EntityRelationship( name=EntityRelationship.Relationships.RELATED_TO, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=item.get('name'), entity_b_type='Attack Pattern' )] attack_indicator = Common.AttackPattern( stix_id=None, value=item.get('name'), mitre_id=item.get('id') ) returned_data.append(CommandResults( readable_output=tableToMarkdown( f"{item.get('name')}", [{ "Description": f"This Attack Pattern was observed after detonation of {main_entity} in ANYRUN" }] ), indicator=attack_indicator, relationships=relationships )) return returned_data
def generate_dbotscore(response: Dict) -> List: """Creates CommandResult object based on the contents of 'response' argument and provides DBotScore objects. Parameters ---------- response : dict Object returned by ANYRUN API call in 'get_report' function. Returns ------- List A list of CommandResults objects. """ data = response.get('data', {}) analysis = data.get('analysis', {}) main_object = analysis.get('content', {}).get('mainObject', {}) submission_type = main_object.get('type') submission_type = 'hash' if submission_type in {'file', 'download'} else submission_type threat_text = analysis.get('scores', {}).get('verdict', {}).get('threatLevelText', '').casefold() reputation_map = { "shared": Common.DBotScore.NONE, "unknown": Common.DBotScore.NONE, "whitelisted": Common.DBotScore.GOOD, "malicious": Common.DBotScore.BAD, "suspicious": Common.DBotScore.SUSPICIOUS } returned_data = [] main_entity = None main_entity_type = None # Add the hash or URL first if submission_type == 'hash': hashes = main_object.get('hashes', {}) info = main_object.get('info', {}) file_type = info.get('file') exif = info.get('exif', {}) main_entity = hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5') main_entity_type = FeedIndicatorType.File dbot_score = Common.DBotScore( indicator=hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5'), indicator_type=DBotScoreType.FILE, integration_name='ANYRUN', score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE ) returned_data.append(CommandResults( indicator=Common.File( dbot_score=dbot_score, md5=hashes.get('md5'), sha1=hashes.get('sha1'), sha256=hashes.get('sha256'), file_type=file_type, associated_file_names=exif.get('OriginalFileName') ) )) else: main_entity = main_object.get('url') main_entity_type = FeedIndicatorType.URL url_outputs = { 'Data': main_object.get('url') } dbot_score = Common.DBotScore( indicator=main_object.get('url'), indicator_type=DBotScoreType.URL, integration_name='ANYRUN', score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE ) if dbot_score.score >= 2: url_outputs['Malicious'] = { 'Vendor': 'ANYRUN', 'Description': threat_text } returned_data.append(CommandResults( outputs_prefix='URL', outputs_key_field=['Data'], outputs=url_outputs, indicator=Common.URL( url=main_object.get('url'), dbot_score=dbot_score, ) )) # Check if network information is available in the report if 'network' in data: network_data = data.get('network') # Then add all the network-related indicators - 'connections' if 'connections' in network_data: connections = network_data.get('connections') for current_connection in connections: reputation = current_connection.get('Reputation') if reputation in reputation_map.keys(): current_dbot_score = Common.DBotScore( indicator=current_connection.get('IP'), indicator_type=DBotScoreType.IP, integration_name='ANYRUN', score=reputation_map[reputation] ) relationships = [EntityRelationship( name=EntityRelationship.Relationships.RELATED_TO, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=current_connection.get('IP'), entity_b_type=FeedIndicatorType.IP, brand="ANYRUN" )] ip_indicator = Common.IP( ip=current_connection.get('IP'), asn=current_connection.get('ASN'), port=current_connection.get('Port'), geo_country=current_connection.get('Country'), dbot_score=current_dbot_score, relationships=relationships ) if current_connection.get('IP') not in [ x.indicator.ip for x in returned_data if isinstance(x.indicator, Common.IP) ]: returned_data.append(CommandResults( readable_output=tableToMarkdown( f"{current_connection.get('IP')}", [{ "Description": f"This IP was observed after detonation of {main_entity} in ANYRUN" }] ), indicator=ip_indicator, relationships=relationships )) # Then add all the network-related indicators - 'dnsRequests' if 'dnsRequests' in network_data: for current_dnsRequests in network_data.get('dnsRequests'): reputation = current_dnsRequests.get('Reputation') if reputation in reputation_map.keys(): current_dbot_score = Common.DBotScore( indicator=current_dnsRequests.get('Domain'), indicator_type=DBotScoreType.DOMAIN, integration_name='ANYRUN', score=reputation_map[reputation] ) relationships = [EntityRelationship( name=EntityRelationship.Relationships.COMMUNICATED_WITH, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=current_dnsRequests.get('Domain'), entity_b_type=FeedIndicatorType.Domain, brand="ANYRUN" )] if "IP" in current_dnsRequests: for ip in current_dnsRequests.get('IP', []): relationships.append( EntityRelationship( name=EntityRelationship.Relationships.RESOLVES_TO, entity_a=current_dnsRequests.get('Domain'), entity_a_type=FeedIndicatorType.Domain, entity_b=ip, entity_b_type=FeedIndicatorType.IP ) ) domain_ip_dbot_score = Common.DBotScore( indicator=ip, indicator_type=DBotScoreType.IP, integration_name="ANYRUN", score=Common.DBotScore.NONE ) domain_ip_indicator = Common.IP( ip=ip, dbot_score=domain_ip_dbot_score ) returned_data.append(CommandResults( indicator=domain_ip_indicator, readable_output=tableToMarkdown( f"{ip}", [{ "Description": f"This IP was resovled from {current_dnsRequests.get('Domain')}" }] ) )) domain_indicator = Common.Domain( domain=current_dnsRequests.get('Domain'), dbot_score=current_dbot_score, relationships=relationships ) if current_dnsRequests.get('Domain') not in [ x.indicator.domain for x in returned_data if isinstance(x.indicator, Common.Domain) ]: returned_data.append(CommandResults( readable_output=tableToMarkdown( f"{current_dnsRequests.get('Domain')}", [{ "Description": f"This domain was observed after detonation of {main_entity} in ANYRUN" }] ), indicator=domain_indicator, relationships=relationships )) # Then add all the network-related indicators - 'httpRequests' if 'httpRequests' in network_data: for current_httpRequests in network_data.get('httpRequests'): reputation = current_httpRequests['Reputation'] if reputation in reputation_map.keys(): current_dbot_score = Common.DBotScore( indicator=current_httpRequests.get('URL'), indicator_type=DBotScoreType.URL, integration_name='ANYRUN', score=reputation_map[reputation] ) relationships = [EntityRelationship( name=EntityRelationship.Relationships.COMMUNICATED_WITH, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=current_httpRequests.get('URL'), entity_b_type=FeedIndicatorType.URL, brand="ANYRUN" )] url_indicator = Common.URL( url=current_httpRequests.get('URL'), geo_country=current_httpRequests.get('Country'), port=current_httpRequests.get('Port'), dbot_score=current_dbot_score, relationships=relationships ) if current_httpRequests.get('URL') not in [ x.indicator.url for x in returned_data if isinstance(x.indicator, Common.URL) ]: returned_data.append(CommandResults( readable_output=tableToMarkdown( f"{current_httpRequests.get('URL')}", [{ "Description": f"This URL was observed after detonation of {main_entity} in ANYRUN" }] ), indicator=url_indicator, relationships=relationships )) if 'mitre' in data: mitre_data = data.get('mitre') for item in mitre_data: relationships = [EntityRelationship( name=EntityRelationship.Relationships.RELATED_TO, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=item.get('name'), entity_b_type='Attack Pattern' )] attack_indicator = Common.AttackPattern( stix_id=None, value=item.get('name'), mitre_id=item.get('id') ) returned_data.append(CommandResults( readable_output=tableToMarkdown( f"{item.get('name')}", [{ "Description": f"This Attack Pattern was observed after detonation of {main_entity} in ANYRUN" }] ), indicator=attack_indicator, relationships=relationships )) return returned_data
7,163
def tvl1(I0, I1, dt=0.2, lambda_=15, tau=0.3, nwarp=5, niter=10, tol=1e-4, prefilter=False): """Coarse to fine TV-L1 optical flow estimator. TV-L1 ia popular algorithm for optical flow estimation intrudced by Zack et al. [1]_, improved in [2]_ and detailed in [3]_. Parameters ---------- I0 : ~numpy.ndarray The first gray scale image of the sequence. I1 : ~numpy.ndarray The second gray scale image of the sequence. dt : float Time step of the numerical scheme. Convergence is proved for values dt < 0.125, but it can be larger for faster convergence. lambda_ : float Attachement parameter. The smaller this parameter is, the smoother is the solutions. tau : float Tightness parameter. It should have a small value in order to maintain attachement and regularization parts in correspondence. nwarp : int Number of times I1 is warped. niter : int Number of fixed point iteration. tol : float Tolerance used as stopping criterion based on the L² distance between two consecutive values of (u, v). prefilter : bool whether to prefilter the estimated optical flow before each image warp. Returns ------- flow : tuple[~numpy.ndarray] The estimated optical flow. References ---------- .. [1] Zach, C., Pock, T., & Bischof, H. (2007, September). A duality based approach for realtime TV-L 1 optical flow. In Joint pattern recognition symposium (pp. 214-223). Springer, Berlin, Heidelberg. .. [2] Wedel, A., Pock, T., Zach, C., Bischof, H., & Cremers, D. (2009). An improved algorithm for TV-L 1 optical flow. In Statistical and geometrical approaches to visual motion analysis (pp. 23-45). Springer, Berlin, Heidelberg. .. [3] Pérez, J. S., Meinhardt-Llopis, E., & Facciolo, G. (2013). TV-L1 optical flow estimation. Image Processing On Line, 2013, 137-150. Examples -------- >>> from skimage.color import rgb2gray >>> from skimage.data import stereo_motorcycle >>> from skimage.registration import tvl1 >>> I0, I1, disp = stereo_motorcycle() >>> # --- Convert the images to gray level: color is not supported. >>> I0 = rgb2gray(I0) >>> I1 = rgb2gray(I1) >>> flow = tvl1(I1, I0) """ solver = partial(_tvl1, dt=dt, lambda_=lambda_, tau=tau, nwarp=nwarp, niter=niter, tol=tol, prefilter=prefilter) return coarse_to_fine(I0, I1, solver)
def tvl1(I0, I1, dt=0.2, lambda_=15, tau=0.3, nwarp=5, niter=10, tol=1e-4, prefilter=False): """Coarse to fine TV-L1 optical flow estimator. TV-L1 ia popular algorithm for optical flow estimation intrudced by Zack et al. [1]_, improved in [2]_ and detailed in [3]_. Parameters ---------- I0 : ~numpy.ndarray The first gray scale image of the sequence. I1 : ~numpy.ndarray The second gray scale image of the sequence. dt : float Time step of the numerical scheme. Convergence is proved for values dt < 0.125, but it can be larger for faster convergence. lambda_ : float Attachment parameter. The smaller this parameter is, the smoother is the solutions. tau : float Tightness parameter. It should have a small value in order to maintain attachement and regularization parts in correspondence. nwarp : int Number of times I1 is warped. niter : int Number of fixed point iteration. tol : float Tolerance used as stopping criterion based on the L² distance between two consecutive values of (u, v). prefilter : bool whether to prefilter the estimated optical flow before each image warp. Returns ------- flow : tuple[~numpy.ndarray] The estimated optical flow. References ---------- .. [1] Zach, C., Pock, T., & Bischof, H. (2007, September). A duality based approach for realtime TV-L 1 optical flow. In Joint pattern recognition symposium (pp. 214-223). Springer, Berlin, Heidelberg. .. [2] Wedel, A., Pock, T., Zach, C., Bischof, H., & Cremers, D. (2009). An improved algorithm for TV-L 1 optical flow. In Statistical and geometrical approaches to visual motion analysis (pp. 23-45). Springer, Berlin, Heidelberg. .. [3] Pérez, J. S., Meinhardt-Llopis, E., & Facciolo, G. (2013). TV-L1 optical flow estimation. Image Processing On Line, 2013, 137-150. Examples -------- >>> from skimage.color import rgb2gray >>> from skimage.data import stereo_motorcycle >>> from skimage.registration import tvl1 >>> I0, I1, disp = stereo_motorcycle() >>> # --- Convert the images to gray level: color is not supported. >>> I0 = rgb2gray(I0) >>> I1 = rgb2gray(I1) >>> flow = tvl1(I1, I0) """ solver = partial(_tvl1, dt=dt, lambda_=lambda_, tau=tau, nwarp=nwarp, niter=niter, tol=tol, prefilter=prefilter) return coarse_to_fine(I0, I1, solver)
20,239
def run(*args): if not args: logger.error("error. Use --script-args [PATH] to specify the " + "location of the csv.") else: page_moves_file = args[0] redirects_file = 'redirects_list.csv' pages_to_move = set() with open(page_moves_file, "r") as csv_file: page_list = csv.reader(csv_file, delimiter=',') next(page_list) # skip the header row # Edit this list to match the headers of the input file, just # make sure page_id and redirect are included for [redirect, _, _, page_id, _, _, _, _, _] in page_list: # Get the set of pages that will need wagtail redirects if redirect == 'TRUE': page = Page.objects.get(id=page_id) live_descendants = \ page.get_descendants(True).filter(live=True) pages_to_move = pages_to_move.union(live_descendants) ids = [pg.id for pg in pages_to_move] logger.info(f"IDs of pages to move: {ids}") logger.info(f"Total pages: {len(ids)}") with open(redirects_file, "w") as output_file: redirects_list = csv.writer(output_file) for page in pages_to_move: row = [page.url, page.id] redirects_list.writerow(row)
def run(*args): if not args: logger.error("error. Use --script-args [PATH] to specify the " + "location of the csv.") else: page_moves_file = args[0] redirects_file = 'redirects_list.csv' pages_to_move = set() with open(page_moves_file, "r") as csv_file: page_list = csv.reader(csv_file, delimiter=',') next(page_list) # skip the header row # Edit this list to match the headers of the input file, just # make sure page_id and redirect are included for [redirect, _, _, page_id, _, _, _, _, _] in page_list: # Get the set of pages that will need wagtail redirects if redirect == 'TRUE': page = Page.objects.get(id=page_id) live_descendants = \ page.get_descendants(True).filter(live=True) pages_to_move = pages_to_move.union(live_descendants) ids = [pg.id for pg in pages_to_move] logger.info(f"IDs of pages to redirect: {ids}") logger.info(f"Total pages: {len(ids)}") with open(redirects_file, "w") as output_file: redirects_list = csv.writer(output_file) for page in pages_to_move: row = [page.url, page.id] redirects_list.writerow(row)
55,399
def load_model(model_uri, dst_path=None): """ Load a LightGBM model from a local file or a run. :param model_uri: The location, in URI format, of the MLflow model. For example: - ``/Users/me/path/to/local/model`` - ``relative/path/to/local/model`` - ``s3://my_bucket/path/to/model`` - ``runs:/<mlflow_run_id>/run-relative/path/to/model`` For more information about supported URI schemes, see `Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html# artifact-locations>`_. :param dst_path: The local filesystem path to which to download the model artifact. This directory must already exist. If unspecified, a local output path will be created. :return: A LightGBM model (an instance of `lightgbm.Booster`_) or LightGBM scikit-learn models, depending on the saved model class specification. """ local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path) return _load_model(path=local_model_path)
def load_model(model_uri, dst_path=None): """ Load a LightGBM model from a local file or a run. :param model_uri: The location, in URI format, of the MLflow model. For example: - ``/Users/me/path/to/local/model`` - ``relative/path/to/local/model`` - ``s3://my_bucket/path/to/model`` - ``runs:/<mlflow_run_id>/run-relative/path/to/model`` For more information about supported URI schemes, see `Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html# artifact-locations>`_. :param dst_path: The local filesystem path to which to download the model artifact. This directory must already exist. If unspecified, a local output path will be created. :return: A LightGBM model (an instance of `lightgbm.Booster`_) or a LightGBM scikit-learn model, depending on the saved model class specification. """ local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path) return _load_model(path=local_model_path)
33,716
def test_atomic_creation(ray_start_cluster): # Setup cluster. cluster = ray_start_cluster bundle_cpu_size = 2 bundle_per_node = 2 num_nodes = 2 [ cluster.add_node(num_cpus=bundle_cpu_size * bundle_per_node) for _ in range(num_nodes) ] ray.init(address=cluster.address) @ray.remote(num_cpus=1) class NormalActor: def ping(self): pass @ray.remote(num_cpus=3) def bothering_task(): import time time.sleep(1) return True # Schedule tasks to fail initial placement group creation. tasks = [bothering_task.remote() for _ in range(2)] # Create an actor that will fail bundle scheduling. # It is important to use pack strategy to make test less flaky. pg = ray.util.placement_group( name="name", strategy="SPREAD", bundles=[{ "CPU": bundle_cpu_size } for _ in range(num_nodes * bundle_per_node)]) # Create a placement group actor. # This shouldn't be scheduled because atomic # placement group creation should've failed pg_actor = NormalActor.options( placement_group=pg, placement_group_bundle_index=num_nodes * bundle_per_node - 1).remote() # Wait on the placement group now. It should be unready # because normal actor takes resources that are required # for one of bundle creation. ready, unready = ray.wait([pg.ready()], timeout=0) assert len(ready) == 0 assert len(unready) == 1 # Wait until all tasks are done. assert all(ray.get(tasks)) # Wait on the placement group creation. Since resources are now available, # it should be ready soon. ready, unready = ray.wait([pg.ready()]) assert len(ready) == 1 assert len(unready) == 0 # Confirm that the placement group actor is created. It will # raise an exception if actor was scheduled before placement # group was created thus it checks atomicity. ray.get(pg_actor.ping.remote(), timeout=3.0) ray.kill(pg_actor) # Make sure atomic creation failure didn't impact resources. @ray.remote(num_cpus=bundle_cpu_size) def resource_check(): return True # This should hang because every resources # are claimed by placement group. check_without_pg = [ resource_check.remote() for _ in range(bundle_per_node * num_nodes) ] # This all should scheduled on each bundle. check_with_pg = [ resource_check.options( placement_group=pg, placement_group_bundle_index=i).remote() for i in range(bundle_per_node * num_nodes) ] # Make sure these are hanging. ready, unready = ray.wait(check_without_pg, timeout=0) assert len(ready) == 0 assert len(unready) == bundle_per_node * num_nodes # Make sure these are all scheduled. assert all(ray.get(check_with_pg)) ray.util.remove_placement_group(pg) def pg_removed(): return ray.util.placement_group_table(pg)["state"] == "REMOVED" wait_for_condition(pg_removed) # Make sure check without pgs are all # scheduled properly because resources are cleaned up. assert all(ray.get(check_without_pg))
def test_atomic_creation(ray_start_cluster): # Setup cluster. cluster = ray_start_cluster bundle_cpu_size = 2 bundle_per_node = 2 num_nodes = 2 [ cluster.add_node(num_cpus=bundle_cpu_size * bundle_per_node) for _ in range(num_nodes) ] ray.init(address=cluster.address) @ray.remote(num_cpus=1) class NormalActor: def ping(self): pass @ray.remote(num_cpus=3) def bothering_task(): import time time.sleep(1) return True # Schedule tasks to fail initial placement group creation. tasks = [bothering_task.remote() for _ in range(2)] # Create an actor that will fail bundle scheduling. # It is important to use pack strategy to make test less flaky. pg = ray.util.placement_group( name="name", strategy="SPREAD", bundles=[{ "CPU": bundle_cpu_size } for _ in range(num_nodes * bundle_per_node)]) # Create a placement group actor. # This shouldn't be scheduled because atomic # placement group creation should've failed. pg_actor = NormalActor.options( placement_group=pg, placement_group_bundle_index=num_nodes * bundle_per_node - 1).remote() # Wait on the placement group now. It should be unready # because normal actor takes resources that are required # for one of bundle creation. ready, unready = ray.wait([pg.ready()], timeout=0) assert len(ready) == 0 assert len(unready) == 1 # Wait until all tasks are done. assert all(ray.get(tasks)) # Wait on the placement group creation. Since resources are now available, # it should be ready soon. ready, unready = ray.wait([pg.ready()]) assert len(ready) == 1 assert len(unready) == 0 # Confirm that the placement group actor is created. It will # raise an exception if actor was scheduled before placement # group was created thus it checks atomicity. ray.get(pg_actor.ping.remote(), timeout=3.0) ray.kill(pg_actor) # Make sure atomic creation failure didn't impact resources. @ray.remote(num_cpus=bundle_cpu_size) def resource_check(): return True # This should hang because every resources # are claimed by placement group. check_without_pg = [ resource_check.remote() for _ in range(bundle_per_node * num_nodes) ] # This all should scheduled on each bundle. check_with_pg = [ resource_check.options( placement_group=pg, placement_group_bundle_index=i).remote() for i in range(bundle_per_node * num_nodes) ] # Make sure these are hanging. ready, unready = ray.wait(check_without_pg, timeout=0) assert len(ready) == 0 assert len(unready) == bundle_per_node * num_nodes # Make sure these are all scheduled. assert all(ray.get(check_with_pg)) ray.util.remove_placement_group(pg) def pg_removed(): return ray.util.placement_group_table(pg)["state"] == "REMOVED" wait_for_condition(pg_removed) # Make sure check without pgs are all # scheduled properly because resources are cleaned up. assert all(ray.get(check_without_pg))
42,342
def get_role_list(collection=None, playbook_dir=None, **kwargs): ''' Run an ``ansible-doc`` command to get list of installed collection roles. Only roles that have an argument specification defined are returned. .. note:: Version added: 2.2 :param str collection: A fully qualified collection name used to filter the results. :param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles. :param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``. :param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be the work directory within container. :param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param bool quiet: Disable all output :param bool json_mode: Store event data in place of stdout on the console and in the stdout file :param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation (based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution. :param bool process_isolation: Enable process isolation, using a container engine (e.g. podman). :param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel) :param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param list container_options: List of container options to pass to execution engine. :param str container_workdir: The working directory within the container. :param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param str private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param str ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout) :param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution. Default value is 'False' :returns: A tuple of response and error string. The response is a python dictionary object (as returned by ansible-doc JSON output) containing each role found, or an empty dict if none are found. ''' event_callback_handler = kwargs.pop('event_handler', None) status_callback_handler = kwargs.pop('status_handler', None) artifacts_handler = kwargs.pop('artifacts_handler', None) cancel_callback = kwargs.pop('cancel_callback', None) finished_callback = kwargs.pop('finished_callback', None) rd = DocConfig(**kwargs) rd.prepare_role_list_command(collection, playbook_dir) r = Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler, cancel_callback=cancel_callback, finished_callback=finished_callback) r.run() response = r.stdout.read() error = r.stderr.read() if response: response = json.loads(sanitize_json_response(response)) return response, error
def get_role_list(collection=None, playbook_dir=None, **kwargs): ''' Run an ``ansible-doc`` command to get list of installed collection roles. Only roles that have an argument specification defined are returned. .. note:: Version added: 2.2 :param str collection: A fully qualified collection name used to filter the results. :param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles. :param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``. :param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be the work directory within container. :param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param bool quiet: Disable all output :param bool json_mode: Store event data in place of stdout on the console and in the stdout file :param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation (based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution. :param bool process_isolation: Enable process isolation, using a container engine (e.g. podman). :param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel) :param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param list container_options: List of container options to pass to execution engine. :param str container_workdir: The working directory within the container. :param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param str private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param str ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout) :param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if value is set to 'True' it will raise 'AnsibleRunnerException' exception. Ff set to 'False', log a debug message and continue execution. Default value is 'False' :returns: A tuple of response and error string. The response is a python dictionary object (as returned by ansible-doc JSON output) containing each role found, or an empty dict if none are found. ''' event_callback_handler = kwargs.pop('event_handler', None) status_callback_handler = kwargs.pop('status_handler', None) artifacts_handler = kwargs.pop('artifacts_handler', None) cancel_callback = kwargs.pop('cancel_callback', None) finished_callback = kwargs.pop('finished_callback', None) rd = DocConfig(**kwargs) rd.prepare_role_list_command(collection, playbook_dir) r = Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler, cancel_callback=cancel_callback, finished_callback=finished_callback) r.run() response = r.stdout.read() error = r.stderr.read() if response: response = json.loads(sanitize_json_response(response)) return response, error
38,966
def run_tests(classes, cases, repeats, json=False): if json: classes = [c for c in classes if hasattr(c, 'to_json')] lpad = max([len(t.package) for t in classes]) + 4 print(f'testing {", ".join([t.package for t in classes])}, {repeats} times each') results = [] csv_results = [] for test_class in classes: times = [] p = test_class.package for i in range(repeats): count, pass_count = 0, 0 start = datetime.now() test = test_class(True) for j in range(3): for case in cases: if json: passed, result = test.to_json(case) else: passed, result = test.validate(case) count += 1 pass_count += passed time = (datetime.now() - start).total_seconds() success = pass_count / count * 100 print(f'{p:>{lpad}} ({i+1:>{len(str(repeats))}}/{repeats}) time={time:0.3f}s, success={success:0.2f}%') times.append(time) print(f'{p:>{lpad}} best={min(times):0.3f}s, avg={mean(times):0.3f}s, stdev={stdev(times):0.3f}s') model_count = 3 * len(cases) avg = mean(times) / model_count * 1e6 sd = stdev(times) / model_count * 1e6 results.append(f'{p:>{lpad}} best={min(times) / model_count * 1e6:0.3f}μs/iter ' f'avg={avg:0.3f}μs/iter stdev={sd:0.3f}μs/iter version={test_class.version}') csv_results.append([p, test_class.version, avg]) print() return results, csv_results
def run_tests(classes, cases, repeats, json=False): if json: classes = [c for c in classes if hasattr(c, 'to_json')] lpad = max([len(t.package) for t in classes]) + 4 print(f'testing {", ".join(t.package for t in classes)}, {repeats} times each') results = [] csv_results = [] for test_class in classes: times = [] p = test_class.package for i in range(repeats): count, pass_count = 0, 0 start = datetime.now() test = test_class(True) for j in range(3): for case in cases: if json: passed, result = test.to_json(case) else: passed, result = test.validate(case) count += 1 pass_count += passed time = (datetime.now() - start).total_seconds() success = pass_count / count * 100 print(f'{p:>{lpad}} ({i+1:>{len(str(repeats))}}/{repeats}) time={time:0.3f}s, success={success:0.2f}%') times.append(time) print(f'{p:>{lpad}} best={min(times):0.3f}s, avg={mean(times):0.3f}s, stdev={stdev(times):0.3f}s') model_count = 3 * len(cases) avg = mean(times) / model_count * 1e6 sd = stdev(times) / model_count * 1e6 results.append(f'{p:>{lpad}} best={min(times) / model_count * 1e6:0.3f}μs/iter ' f'avg={avg:0.3f}μs/iter stdev={sd:0.3f}μs/iter version={test_class.version}') csv_results.append([p, test_class.version, avg]) print() return results, csv_results
4,962
def test_Issue_17908(): # Animation frame_format should allow any of the following # if any of these are not allowed, an exception will be raised for fmt in ['png', 'jpeg', 'tiff', 'raw', 'rgba', 'ppm', 'sgi', 'bmp', 'pbm', 'svg']: mpl.rcParams['animation.frame_format'] = fmt
def test_animation_frame_format(): # Animation frame_format should allow any of the following # if any of these are not allowed, an exception will be raised for fmt in ['png', 'jpeg', 'tiff', 'raw', 'rgba', 'ppm', 'sgi', 'bmp', 'pbm', 'svg']: mpl.rcParams['animation.frame_format'] = fmt
202
def send_mail(from_addr, to_addr, subject, body_text, headers=None): """ Send an e-mail. Args: from_addr (str): The address to use in the From: header. to_addr (str): The address to send the e-mail to. subject (basestring): The subject of the e-mail. body_text (basestring): The body of the e-mail to be sent. headers (dict or None): A mapping of header fields to values to be included in the e-mail, if not None. """ if not from_addr: from_addr = config.get('bodhi_email') if not from_addr: log.warning('Unable to send mail: bodhi_email not defined in the config') return if to_addr in config.get('exclude_mail'): return subject = subject.encode('utf-8') body_text = body_text.encode('utf-8') msg = [b'From: %s' % from_addr.encode('utf-8'), b'To: %s' % to_addr.encode('utf-8')] if headers: for key, value in headers.items(): msg.append(f'{key}: {value}'.encode('utf-8')) msg.append(f"X-Bodhi: {config.get('default_email_domain')}".encode('utf-8')) msg += [b'Subject: %s' % subject, b'', body_text] body = b'\r\n'.join(msg) log.info('Sending mail to %s: %s', to_addr, subject) _send_mail(from_addr, to_addr, body)
def send_mail(from_addr, to_addr, subject, body_text, headers=None): """ Send an e-mail. Args: from_addr (str): The address to use in the From: header. to_addr (str): The address to send the e-mail to. subject (basestring): The subject of the e-mail. body_text (basestring): The body of the e-mail to be sent. headers (dict or None): A mapping of header fields to values to be included in the e-mail, if not None. """ if not from_addr: from_addr = config.get('bodhi_email') if not from_addr: log.warning('Unable to send mail: bodhi_email not defined in the config') return if to_addr in config.get('exclude_mail'): return subject = subject.encode('utf-8') body_text = body_text.encode('utf-8') msg = [f'From: {from_addr}'.encode('utf-8'), f'To: {to_addr}'.encode('utf-8')] if headers: for key, value in headers.items(): msg.append(f'{key}: {value}'.encode('utf-8')) msg.append(f"X-Bodhi: {config.get('default_email_domain')}".encode('utf-8')) msg += [b'Subject: %s' % subject, b'', body_text] body = b'\r\n'.join(msg) log.info('Sending mail to %s: %s', to_addr, subject) _send_mail(from_addr, to_addr, body)
32,825
def extract_appveyor(env): url = "https://ci.appveyor.com/project/{0}/builds/{1}".format( env.get("APPVEYOR_REPO_NAME"), env.get("APPVEYOR_BUILD_ID") ) if env.get("APPVEYOR_REPO_PROVIDER") and env.get("APPVEYOR_REPO_PROVIDER") == "github": repository = "https://github.com/{0}.git".format(env.get("APPVEYOR_REPO_NAME")) commit = env.get("APPVEYOR_REPO_COMMIT") branch = env.get("APPVEYOR_PULL_REQUEST_HEAD_REPO_BRANCH") or env.get("APPVEYOR_REPO_BRANCH") tag = env.get("APPVEYOR_REPO_TAG_NAME") else: repository = commit = branch = tag = None return { PROVIDER_NAME: "appveyor", git.REPOSITORY_URL: repository, git.COMMIT_SHA: commit, WORKSPACE_PATH: env.get("APPVEYOR_BUILD_FOLDER"), PIPELINE_ID: env.get("APPVEYOR_BUILD_ID"), PIPELINE_NAME: env.get("APPVEYOR_REPO_NAME"), PIPELINE_NUMBER: env.get("APPVEYOR_BUILD_NUMBER"), PIPELINE_URL: url, JOB_URL: url, git.BRANCH: branch, git.TAG: tag, }
def extract_appveyor(env): url = "https://ci.appveyor.com/project/{0}/builds/{1}".format( env.get("APPVEYOR_REPO_NAME"), env.get("APPVEYOR_BUILD_ID") ) if env.get("APPVEYOR_REPO_PROVIDER") == "github": repository = "https://github.com/{0}.git".format(env.get("APPVEYOR_REPO_NAME")) commit = env.get("APPVEYOR_REPO_COMMIT") branch = env.get("APPVEYOR_PULL_REQUEST_HEAD_REPO_BRANCH") or env.get("APPVEYOR_REPO_BRANCH") tag = env.get("APPVEYOR_REPO_TAG_NAME") else: repository = commit = branch = tag = None return { PROVIDER_NAME: "appveyor", git.REPOSITORY_URL: repository, git.COMMIT_SHA: commit, WORKSPACE_PATH: env.get("APPVEYOR_BUILD_FOLDER"), PIPELINE_ID: env.get("APPVEYOR_BUILD_ID"), PIPELINE_NAME: env.get("APPVEYOR_REPO_NAME"), PIPELINE_NUMBER: env.get("APPVEYOR_BUILD_NUMBER"), PIPELINE_URL: url, JOB_URL: url, git.BRANCH: branch, git.TAG: tag, }
23,654
def get_sky_diffuse(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=None, airmass=None, model='isotropic', model_perez='allsitescomposite1990'): r""" Determine in-plane sky diffuse irradiance component using the specified sky diffuse irradiance model. Sky diffuse models include: * isotropic (default) * klucher * haydavies * reindl * king * perez Parameters ---------- surface_tilt : numeric Panel tilt from horizontal.[degree] surface_azimuth : numeric Panel azimuth from north. [degree] solar_zenith : numeric Solar zenith angle. [degree] solar_azimuth : numeric Solar azimuth angle. [degree] dni : numeric Direct Normal Irradiance. [W/m2] ghi : numeric Global horizontal irradiance. [W/m2] dhi : numeric Diffuse horizontal irradiance. [W/m2] dni_extra : None or numeric, default None Extraterrestrial direct normal irradiance. [W/m2] airmass : None or numeric, default None Relative airmass (not adjusted for pressure). [unitless] model : String, default 'isotropic' Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies', 'reindl', 'king', 'perez'. model_perez : String, default 'allsitescomposite1990' Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`. Returns ------- poa_sky_diffuse : numeric Sky diffuse irradiance in the plane of array. [W/m2] Raises ------ ValueError If model is one of 'haydavies', 'reindl', or 'perez' and dni_extra is None. Notes ----- Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`. The 'perez' model requires relative airmass ('airmass') as input. If 'airmass' is not provided, it is calculated usign the defaults in :py:func:`~pvlib.irradiance.get_relative_airmass`. """ model = model.lower() if (model in {'haydavies', 'reindl', 'perez'}) and (dni_extra is None): raise ValueError(f'dni_extra is required for model {model}') if model == 'isotropic': sky = isotropic(surface_tilt, dhi) elif model == 'klucher': sky = klucher(surface_tilt, surface_azimuth, dhi, ghi, solar_zenith, solar_azimuth) elif model == 'haydavies': sky = haydavies(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth) elif model == 'reindl': sky = reindl(surface_tilt, surface_azimuth, dhi, dni, ghi, dni_extra, solar_zenith, solar_azimuth) elif model == 'king': sky = king(surface_tilt, dhi, ghi, solar_zenith) elif model == 'perez': if airmass is None: airmass = atmosphere.get_relative_airmass(solar_zenith) sky = perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth, airmass, model=model_perez) else: raise ValueError(f'invalid model selection {model}') return sky
def get_sky_diffuse(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=None, airmass=None, model='isotropic', model_perez='allsitescomposite1990'): r""" Determine in-plane sky diffuse irradiance component using the specified sky diffuse irradiance model. Sky diffuse models include: * isotropic (default) * klucher * haydavies * reindl * king * perez Parameters ---------- surface_tilt : numeric Panel tilt from horizontal.[degree] surface_azimuth : numeric Panel azimuth from north. [degree] solar_zenith : numeric Solar zenith angle. [degree] solar_azimuth : numeric Solar azimuth angle. [degree] dni : numeric Direct Normal Irradiance. [W/m2] ghi : numeric Global horizontal irradiance. [W/m2] dhi : numeric Diffuse horizontal irradiance. [W/m2] dni_extra : None or numeric, default None Extraterrestrial direct normal irradiance. [W/m2] airmass : None or numeric, default None Relative airmass (not adjusted for pressure). [unitless] model : String, default 'isotropic' Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies', 'reindl', 'king', 'perez'. model_perez : String, default 'allsitescomposite1990' Used only if ``model='perez'``. See :py:func:`~pvlib.irradiance.perez`. Returns ------- poa_sky_diffuse : numeric Sky diffuse irradiance in the plane of array. [W/m2] Raises ------ ValueError If model is one of 'haydavies', 'reindl', or 'perez' and dni_extra is None. Notes ----- Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`. The 'perez' model requires relative airmass ('airmass') as input. If 'airmass' is not provided, it is calculated usign the defaults in :py:func:`~pvlib.irradiance.get_relative_airmass`. """ model = model.lower() if (model in {'haydavies', 'reindl', 'perez'}) and (dni_extra is None): raise ValueError(f'dni_extra is required for model {model}') if model == 'isotropic': sky = isotropic(surface_tilt, dhi) elif model == 'klucher': sky = klucher(surface_tilt, surface_azimuth, dhi, ghi, solar_zenith, solar_azimuth) elif model == 'haydavies': sky = haydavies(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth) elif model == 'reindl': sky = reindl(surface_tilt, surface_azimuth, dhi, dni, ghi, dni_extra, solar_zenith, solar_azimuth) elif model == 'king': sky = king(surface_tilt, dhi, ghi, solar_zenith) elif model == 'perez': if airmass is None: airmass = atmosphere.get_relative_airmass(solar_zenith) sky = perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth, airmass, model=model_perez) else: raise ValueError(f'invalid model selection {model}') return sky
30,467
def main(): # Write configure here params = {k: v for k, v in demisto.params().items() if v is not None} client = Client(**params) command = demisto.command() demisto.info('Command being called is {}'.format(command)) # Switch case commands = { 'test-module': test_module, 'get-indicators': get_indicators_command } try: if demisto.command() == 'fetch-indicators': indicators = fetch_indicators_command(client, params.get('indicator_type')) # we submit the indicators in batches for b in batch(indicators, batch_size=2000): demisto.createIndicators(b) else: readable_output, outputs, raw_response = commands[command](client, demisto.args()) return_outputs(readable_output, outputs, raw_response) except Exception as e: err_msg = f'Error in Spamhaus feed [{e}]' return_error(err_msg)
def main(): # Write configure here params = {k: v for k, v in demisto.params().items() if v is not None} client = Client(**params) command = demisto.command() demisto.info('Command being called is {}'.format(command)) # Switch case commands = { 'test-module': test_module, 'get-indicators': get_indicators_command } try: if demisto.command() == 'fetch-indicators': indicators = fetch_indicators_command(client, params.get('indicator_type')) # we submit the indicators in batches for b in batch(indicators, batch_size=2000): demisto.createIndicators(b) else: readable_output, outputs, raw_response = commands[command](client, demisto.args()) return_outputs(readable_output, outputs, raw_response) except Exception as e: err_msg = f'Error in {FEED_NAME} feed [{e}]' return_error(err_msg)
38,940
def test_bytesize_to(): class Model(BaseModel): size: ByteSize m = Model(size='1GiB') assert pytest.approx(m.size.to('MiB')) == 1024 assert pytest.approx(m.size.to('MB')) == 1073.741824 assert pytest.approx(m.size.to('TiB')) == 0.0009765625
def test_bytesize_to(): class Model(BaseModel): size: ByteSize m = Model(size='1GiB') assert pytest.approx(m.size.to('MiB')) == 1024 assert pytest.approx(m.size.to('MB')) == 1073.741824 assert m.size.to('TiB') == pytest.approx(0.0009765625)
13,164
def test_project_remote_mirrors(project): mirror_url = "https://gitlab.com/root/mirror.git" mirror = project.remote_mirrors.create({"url": mirror_url}) assert mirror.url == mirror_url mirror.enabled = True mirror.save() mirror = project.remote_mirrors.list()[0] assert isinstance(mirror, gitlab.v4.objects.ProjectRemoteMirror) assert mirror.url == mirror_url assert mirror.enabled is True
def test_project_remote_mirrors(project): mirror_url = "https://gitlab.example.com/root/mirror.git" mirror = project.remote_mirrors.create({"url": mirror_url}) assert mirror.url == mirror_url mirror.enabled = True mirror.save() mirror = project.remote_mirrors.list()[0] assert isinstance(mirror, gitlab.v4.objects.ProjectRemoteMirror) assert mirror.url == mirror_url assert mirror.enabled is True
50,201
def _generate_file_level_targets( generated_target_cls: type[Target], generator: Target, paths: Sequence[str], template_address: Address, template: dict[str, Any], overrides: dict[str, dict[Address, dict[str, Any]]], # NB: Should only ever be set to `None` in tests. union_membership: UnionMembership | None, *, add_dependencies_on_all_siblings: bool, use_generated_address_syntax: bool = False, ) -> GeneratedTargets: """Generate one new target for each path, using the same fields as the generator target except for the `sources` field only referring to the path and using a new address. Set `add_dependencies_on_all_siblings` to True so that each file-level target depends on all other generated targets from the target generator. This is useful if both are true: a) file-level targets usually need their siblings to be present to work. Most target types (Python, Java, Shell, etc) meet this, except for `files` and `resources` which have no concept of "imports" b) dependency inference cannot infer dependencies on sibling files. Otherwise, set `add_dependencies_on_all_siblings` to `False` so that dependencies are finer-grained. `overrides` allows changing the fields for particular targets. It expects the full file path as the key. """ # paths have already been globbed, and new path will also be globbed # therefore, paths should be glob escaped paths = (glob.escape(path) for path in paths) def generate_address(base_address: Address, relativized_fp: str) -> Address: return ( base_address.create_generated(relativized_fp) if use_generated_address_syntax else base_address.create_file(relativized_fp) ) normalized_overrides = dict(overrides or {}) all_generated_items: list[tuple[Address, str, dict[str, Any]]] = [] for fp in paths: relativized_fp = fast_relpath(fp, template_address.spec_path) generated_overrides = normalized_overrides.pop(fp, None) if generated_overrides is None: # No overrides apply. all_generated_items.append( (generate_address(template_address, relativized_fp), fp, dict(template)) ) else: # At least one override applies. Generate a target per set of fields. all_generated_items.extend( ( generate_address(overridden_address, relativized_fp), fp, {**template, **override_fields}, ) for overridden_address, override_fields in generated_overrides.items() ) # TODO: Parametrization in overrides will result in some unusual internal dependencies when # `add_dependencies_on_all_siblings`. Similar to inference, `add_dependencies_on_all_siblings` # should probably be field value aware. all_generated_address_specs = ( FrozenOrderedSet(addr.spec for addr, _, _ in all_generated_items) if add_dependencies_on_all_siblings else FrozenOrderedSet() ) def gen_tgt(address: Address, full_fp: str, generated_target_fields: dict[str, Any]) -> Target: if add_dependencies_on_all_siblings: if union_membership and not generated_target_cls.class_has_field( Dependencies, union_membership ): raise AssertionError( f"The {type(generator).__name__} target class generates " f"{generated_target_cls.__name__} targets, which do not " f"have a `{Dependencies.alias}` field, and thus cannot " "`add_dependencies_on_all_siblings`." ) original_deps = generated_target_fields.get(Dependencies.alias, ()) generated_target_fields[Dependencies.alias] = tuple(original_deps) + tuple( all_generated_address_specs - {address.spec} ) generated_target_fields[SingleSourceField.alias] = fast_relpath(full_fp, address.spec_path) return generated_target_cls( generated_target_fields, address, union_membership, residence_dir=os.path.dirname(full_fp), ) result = tuple( gen_tgt(address, full_fp, fields) for address, full_fp, fields in all_generated_items ) if normalized_overrides: unused_relative_paths = sorted( fast_relpath(fp, template_address.spec_path) for fp in normalized_overrides ) all_valid_relative_paths = sorted( cast(str, tgt.address._relative_file_path or tgt.address.generated_name) for tgt in result ) raise InvalidFieldException( f"Unused file paths in the `overrides` field for {template_address}: " f"{sorted(unused_relative_paths)}" f"\n\nDid you mean one of these valid paths?\n\n" f"{all_valid_relative_paths}" ) return GeneratedTargets(generator, result)
def _generate_file_level_targets( generated_target_cls: type[Target], generator: Target, paths: Sequence[str], template_address: Address, template: dict[str, Any], overrides: dict[str, dict[Address, dict[str, Any]]], # NB: Should only ever be set to `None` in tests. union_membership: UnionMembership | None, *, add_dependencies_on_all_siblings: bool, use_generated_address_syntax: bool = False, ) -> GeneratedTargets: """Generate one new target for each path, using the same fields as the generator target except for the `sources` field only referring to the path and using a new address. Set `add_dependencies_on_all_siblings` to True so that each file-level target depends on all other generated targets from the target generator. This is useful if both are true: a) file-level targets usually need their siblings to be present to work. Most target types (Python, Java, Shell, etc) meet this, except for `files` and `resources` which have no concept of "imports" b) dependency inference cannot infer dependencies on sibling files. Otherwise, set `add_dependencies_on_all_siblings` to `False` so that dependencies are finer-grained. `overrides` allows changing the fields for particular targets. It expects the full file path as the key. """ # Paths have already been globbed, and new path will also be globbed # therefore, paths should be glob escaped. paths = (glob.escape(path) for path in paths) def generate_address(base_address: Address, relativized_fp: str) -> Address: return ( base_address.create_generated(relativized_fp) if use_generated_address_syntax else base_address.create_file(relativized_fp) ) normalized_overrides = dict(overrides or {}) all_generated_items: list[tuple[Address, str, dict[str, Any]]] = [] for fp in paths: relativized_fp = fast_relpath(fp, template_address.spec_path) generated_overrides = normalized_overrides.pop(fp, None) if generated_overrides is None: # No overrides apply. all_generated_items.append( (generate_address(template_address, relativized_fp), fp, dict(template)) ) else: # At least one override applies. Generate a target per set of fields. all_generated_items.extend( ( generate_address(overridden_address, relativized_fp), fp, {**template, **override_fields}, ) for overridden_address, override_fields in generated_overrides.items() ) # TODO: Parametrization in overrides will result in some unusual internal dependencies when # `add_dependencies_on_all_siblings`. Similar to inference, `add_dependencies_on_all_siblings` # should probably be field value aware. all_generated_address_specs = ( FrozenOrderedSet(addr.spec for addr, _, _ in all_generated_items) if add_dependencies_on_all_siblings else FrozenOrderedSet() ) def gen_tgt(address: Address, full_fp: str, generated_target_fields: dict[str, Any]) -> Target: if add_dependencies_on_all_siblings: if union_membership and not generated_target_cls.class_has_field( Dependencies, union_membership ): raise AssertionError( f"The {type(generator).__name__} target class generates " f"{generated_target_cls.__name__} targets, which do not " f"have a `{Dependencies.alias}` field, and thus cannot " "`add_dependencies_on_all_siblings`." ) original_deps = generated_target_fields.get(Dependencies.alias, ()) generated_target_fields[Dependencies.alias] = tuple(original_deps) + tuple( all_generated_address_specs - {address.spec} ) generated_target_fields[SingleSourceField.alias] = fast_relpath(full_fp, address.spec_path) return generated_target_cls( generated_target_fields, address, union_membership, residence_dir=os.path.dirname(full_fp), ) result = tuple( gen_tgt(address, full_fp, fields) for address, full_fp, fields in all_generated_items ) if normalized_overrides: unused_relative_paths = sorted( fast_relpath(fp, template_address.spec_path) for fp in normalized_overrides ) all_valid_relative_paths = sorted( cast(str, tgt.address._relative_file_path or tgt.address.generated_name) for tgt in result ) raise InvalidFieldException( f"Unused file paths in the `overrides` field for {template_address}: " f"{sorted(unused_relative_paths)}" f"\n\nDid you mean one of these valid paths?\n\n" f"{all_valid_relative_paths}" ) return GeneratedTargets(generator, result)
24,482
def assert_all_metrics(aggregator, minimun_tags=None, hostname=None): for metric, metric_type in METRICS: aggregator.assert_metric(metric, metric_type=getattr(aggregator, metric_type.upper()), hostname=hostname) minimun_tags = minimun_tags or [] for tag in minimun_tags: aggregator.assert_metric_has_tag(metric, tag) for metric, metric_type in OPTIONAL_METRICS: aggregator.assert_metric( metric, metric_type=getattr(aggregator, metric_type.upper()), hostname=hostname, at_least=0 ) aggregator.assert_all_metrics_covered()
def assert_all_metrics(aggregator, minimun_tags=None, hostname=None): for metric, metric_type in METRICS: aggregator.assert_metric(metric, metric_type=getattr(aggregator, metric_type.upper()), hostname=hostname) minimum_tags = minimum_tags or [] for tag in minimun_tags: aggregator.assert_metric_has_tag(metric, tag) for metric, metric_type in OPTIONAL_METRICS: aggregator.assert_metric( metric, metric_type=getattr(aggregator, metric_type.upper()), hostname=hostname, at_least=0 ) aggregator.assert_all_metrics_covered()
58,708
def drop_intents_below_freq( training_data: TrainingData, cutoff: int = 5 ) -> TrainingData: """Remove intent groups with less than cutoff instances. Args: training_data: training data cutoff: threshold Returns: updated training data """ logger.debug( "Raw data intent examples: {}".format(len(training_data.intent_examples)) ) examples_per_intent = training_data.number_of_examples_per_intent.items() intents_below_cutoff = {i for i, c in examples_per_intent if c < cutoff} return training_data.filter_training_examples( lambda ex: ex.get(INTENT) not in intents_below_cutoff )
def drop_intents_below_freq( training_data: TrainingData, cutoff: int = 5 ) -> TrainingData: """Remove intent groups with less than cutoff instances. Args: training_data: training data cutoff: threshold Returns: updated training data """ logger.debug( "Raw data intent examples: {}".format(len(training_data.intent_examples)) ) examples_per_intent = training_data.number_of_examples_per_intent.items() return training_data.filter_training_examples( lambda ex: examples_per_intent[ex.get(INTENT)] >= cutoff )
45,106
def with_toloka_client(func: Callable) -> Callable: """ Decorator that allows function to pass `secret_name` and `env` args and operate with `toloka_client` instance. Args: - func (Callable): function, that operate with `toloka_client` argument. Returns: - Callable: the wrapper, that takes optional `secret_name` and `env` arguments and operates with default `toloka_token` if they are not passed. Example: >>> @with_toloka_client ... def some_func(toloka_client: TolokaClient): ... toloka_client.create_project(...) ... >>> some_func() # Use default toloka_client created using TOLOKA_TOKEN secret. >>> some_func(secret_name='OTHER_ACCOUNT_SECRET') # Allow to pass other secret. ... """ def _wrapper( *args, secret_name: str = DEFAULT_TOLOKA_SECRET_NAME, env: str = DEFAULT_TOLOKA_ENV, **kwargs, ) -> Any: token = Secret(secret_name).get() toloka_client = TolokaClient(token, env) return partial(add_headers("prefect")(func), toloka_client=toloka_client)( *args, **kwargs ) return with_updated_signature( func, _wrapper, remove_func_args=("toloka_client",), add_wrapper_args=("secret_name", "env"), )
def with_toloka_client(func: Callable) -> Callable: """ Decorator that allows function to pass `secret_name` and `env` args and operate with `toloka_client` instance. Args: - func (Callable): Function that operate with `toloka_client` argument. Returns: - Callable: the wrapper, that takes optional `secret_name` and `env` arguments and operates with default `toloka_token` if they are not passed. Example: >>> @with_toloka_client ... def some_func(toloka_client: TolokaClient): ... toloka_client.create_project(...) ... >>> some_func() # Use default toloka_client created using TOLOKA_TOKEN secret. >>> some_func(secret_name='OTHER_ACCOUNT_SECRET') # Allow to pass other secret. ... """ def _wrapper( *args, secret_name: str = DEFAULT_TOLOKA_SECRET_NAME, env: str = DEFAULT_TOLOKA_ENV, **kwargs, ) -> Any: token = Secret(secret_name).get() toloka_client = TolokaClient(token, env) return partial(add_headers("prefect")(func), toloka_client=toloka_client)( *args, **kwargs ) return with_updated_signature( func, _wrapper, remove_func_args=("toloka_client",), add_wrapper_args=("secret_name", "env"), )
7,403
def _match_cumulative_cdf(source, template): """ Return modified source array so that the cumulative density function of its values matches the cumulative density function of the template. """ if source.dtype.kind == 'u': src_lookup = source.ravel() src_counts = np.bincount(src_lookup) tmpl_counts = np.bincount(template.ravel()) tmpl_values = np.arange(len(tmpl_counts)) tmpl_values_exist = tmpl_counts > 0 tmpl_counts = tmpl_counts[tmpl_values_exist] tmpl_values = tmpl_values[tmpl_values_exist] else: src_values, src_lookup, src_counts = np.unique(source.ravel(), return_inverse=True, return_counts=True) tmpl_values, tmpl_counts = np.unique(template.ravel(), return_counts=True) # calculate normalized quantiles for each array src_quantiles = np.cumsum(src_counts) / source.size tmpl_quantiles = np.cumsum(tmpl_counts) / template.size interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values) return interp_a_values[src_lookup].reshape(source.shape)
def _match_cumulative_cdf(source, template): """ Return modified source array so that the cumulative density function of its values matches the cumulative density function of the template. """ if source.dtype.kind == 'u': src_lookup = source.ravel() src_counts = np.bincount(src_lookup) tmpl_counts = np.bincount(template.ravel()) tmpl_values = np.arange(len(tmpl_counts)) tmpl_values_exist = tmpl_counts > 0 tmpl_counts = tmpl_counts[tmpl_values_exist] tmpl_values = tmpl_values[tmpl_values_exist] else: src_values, src_lookup, src_counts = np.unique(source.reshape(-1), return_inverse=True, return_counts=True) tmpl_values, tmpl_counts = np.unique(template.ravel(), return_counts=True) # calculate normalized quantiles for each array src_quantiles = np.cumsum(src_counts) / source.size tmpl_quantiles = np.cumsum(tmpl_counts) / template.size interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values) return interp_a_values[src_lookup].reshape(source.shape)
33,917
def _configure_subnet(config): ec2 = _resource("ec2", config) use_internal_ips = config["provider"].get("use_internal_ips", False) # If head or worker security group is specified, filter down to subnets # belonging to the same VPC as the security group. sg_ids = [] for node_type in config["available_node_types"].values(): node_config = node_type["node_config"] sg_ids.extend(node_config.get("SecurityGroupIds", [])) if sg_ids: vpc_id_of_sg = _get_vpc_id_of_sg(sg_ids, config) else: vpc_id_of_sg = None try: candidate_subnets = ec2.subnets.all() if vpc_id_of_sg: candidate_subnets = [ s for s in candidate_subnets if s.vpc_id == vpc_id_of_sg ] subnets = sorted( ( s for s in candidate_subnets if s.state == "available" and (use_internal_ips or s.map_public_ip_on_launch) ), reverse=True, # sort from Z-A key=lambda subnet: subnet.availability_zone, ) except botocore.exceptions.ClientError as exc: handle_boto_error(exc, "Failed to fetch available subnets from AWS.") raise exc if not subnets: cli_logger.abort( "No usable subnets found, try manually creating an instance in " "your specified region to populate the list of subnets " "and trying this again.\n" "Note that the subnet must map public IPs " "on instance launch unless you set `use_internal_ips: true` in " "the `provider` config." ) if "availability_zone" in config["provider"]: azs = config["provider"]["availability_zone"].split(",") subnets = [ s for az in azs # Iterate over AZs first to maintain the ordering for s in subnets if s.availability_zone == az ] if not subnets: cli_logger.abort( "No usable subnets matching availability zone {} found.\n" "Choose a different availability zone or try " "manually creating an instance in your specified region " "to populate the list of subnets and trying this again.", config["provider"]["availability_zone"], ) # Use subnets in only one VPC, so that _configure_security_groups only # needs to create a security group in this one VPC. Otherwise, we'd need # to set up security groups in all of the user's VPCs and set up networking # rules to allow traffic between these groups. # See https://github.com/ray-project/ray/pull/14868. subnet_ids = [s.subnet_id for s in subnets if s.vpc_id == subnets[0].vpc_id] # map from node type key -> source of SubnetIds field subnet_src_info = {} _set_config_info(subnet_src=subnet_src_info) for key, node_type in config["available_node_types"].items(): node_config = node_type["node_config"] if "SubnetIds" not in node_config: subnet_src_info[key] = "default" node_config["SubnetIds"] = subnet_ids else: node_type_subnets = node_config["SubnetIds"] if len(set(subnet_ids).intersection(node_type_subnets)) == 0: cli_logger.abort( "MISMATCH between available subnets & specified subnets!\n" "The available subnets (as determined by Availability Zone " "state, ability to launch public IPs & Availability Zones " f"specified in the `provider` section) are: {subnets}.\n" "Node type ({node_type}) specifies subnets: {node_type_subnets}" ) subnet_src_info[key] = "config" return config
def _configure_subnet(config): ec2 = _resource("ec2", config) use_internal_ips = config["provider"].get("use_internal_ips", False) # If head or worker security group is specified, filter down to subnets # belonging to the same VPC as the security group. sg_ids = [] for node_type in config["available_node_types"].values(): node_config = node_type["node_config"] sg_ids.extend(node_config.get("SecurityGroupIds", [])) if sg_ids: vpc_id_of_sg = _get_vpc_id_of_sg(sg_ids, config) else: vpc_id_of_sg = None try: candidate_subnets = ec2.subnets.all() if vpc_id_of_sg: candidate_subnets = [ s for s in candidate_subnets if s.vpc_id == vpc_id_of_sg ] subnets = sorted( ( s for s in candidate_subnets if s.state == "available" and (use_internal_ips or s.map_public_ip_on_launch) ), reverse=True, # sort from Z-A key=lambda subnet: subnet.availability_zone, ) except botocore.exceptions.ClientError as exc: handle_boto_error(exc, "Failed to fetch available subnets from AWS.") raise exc if not subnets: cli_logger.abort( "No usable subnets found, try manually creating an instance in " "your specified region to populate the list of subnets " "and trying this again.\n" "Note that the subnet must map public IPs " "on instance launch unless you set `use_internal_ips: true` in " "the `provider` config." ) if "availability_zone" in config["provider"]: azs = config["provider"]["availability_zone"].split(",") subnets = [ s for az in azs # Iterate over AZs first to maintain the ordering for s in subnets if s.availability_zone == az ] if not subnets: cli_logger.abort( "No usable subnets matching availability zone {} found.\n" "Choose a different availability zone or try " "manually creating an instance in your specified region " "to populate the list of subnets and trying this again.", config["provider"]["availability_zone"], ) # Use subnets in only one VPC, so that _configure_security_groups only # needs to create a security group in this one VPC. Otherwise, we'd need # to set up security groups in all of the user's VPCs and set up networking # rules to allow traffic between these groups. # See https://github.com/ray-project/ray/pull/14868. subnet_ids = [s.subnet_id for s in subnets if s.vpc_id == subnets[0].vpc_id] # map from node type key -> source of SubnetIds field subnet_src_info = {} _set_config_info(subnet_src=subnet_src_info) for key, node_type in config["available_node_types"].items(): node_config = node_type["node_config"] if "SubnetIds" not in node_config: subnet_src_info[key] = "default" node_config["SubnetIds"] = subnet_ids else: node_type_subnets = node_config["SubnetIds"] if len(set(subnet_ids).intersection(node_type_subnets)) == 0: cli_logger.abort( "MISMATCH between available subnets & specified subnets!\n" "The available subnets (as determined by Availability Zone " "state, ability to launch public IPs & Availability Zones " f"specified in the `provider` section) are: {subnets}.\n" f"Node type ({node_type}) specifies subnets: {node_type_subnets}" ) subnet_src_info[key] = "config" return config
27,161
def test_toolip_derived_from_function_docstring(): """Test that the tooltip for TaskGroup is the decorated-function's docsring.""" @dag(start_date=pendulum.datetime(2022, 1, 1)) def pipeline(): @task_group() def tg(): """Function docstring.""" tg() _ = pipeline() assert _.task_group_dict["tg"].tooltip == "Function docstring."
def test_toolip_derived_from_function_docstring(): """Test that the tooltip for TaskGroup is the decorated-function's docstring.""" @dag(start_date=pendulum.datetime(2022, 1, 1)) def pipeline(): @task_group() def tg(): """Function docstring.""" tg() _ = pipeline() assert _.task_group_dict["tg"].tooltip == "Function docstring."
38,538
def triangulations( p_1: np.ndarray, p_2: np.ndarray, t_1: np.ndarray, t_2: np.ndarray ) -> List[Tuple[int, int, float]]: """Compute intersection of two triangle tessalation of a surface. The function will identify partly overlapping triangles between t_1 and t_2, and compute their common area. If parts of domain 1 or 2 is covered by one tessalation only, this will simply be ignored by the function. Implementation note: The function relies on the intersection algorithm in shapely.geometry.Polygon. It may be possible to extend the functionality to other cell shapes. This would require more general data structures, but should not be too much of an effort. Parameters: p_1 (np.array, 2 x n_p1): Points in first tessalation. p_2 (np.array, 2 x n_p2): Points in second tessalation. t_1 (np.array, 3 x n_tri_1): Triangles in first tessalation, referring to indices in p_1. t_2 (np.array, 3 x n_tri_1): Triangles in second tessalation, referring to indices in p_2. Returns: list of tuples: Each representing an overlap. The tuple contains index of the overlapping triangles in the first and second tessalation, and their common area. See also: surface_tessalations() """ import shapely.geometry as shapely_geometry import shapely.speedups as shapely_speedups try: shapely_speedups.enable() except AttributeError: pass n_1 = t_1.shape[1] n_2 = t_2.shape[1] t_1 = t_1.T t_2 = t_2.T # Find x and y coordinates of the triangles of first tessalation x_1 = p_1[0, t_1] y_1 = p_1[1, t_1] # Same with second tessalation x_2 = p_2[0, t_2] y_2 = p_2[1, t_2] intersections: List[Tuple[int, int, float]] = [] # Bounding box of each triangle for first and second tessalation min_x_1 = np.min(x_1, axis=1) max_x_1 = np.max(x_1, axis=1) min_y_1 = np.min(y_1, axis=1) max_y_1 = np.max(y_1, axis=1) min_x_2 = np.min(x_2, axis=1) max_x_2 = np.max(x_2, axis=1) min_y_2 = np.min(y_2, axis=1) max_y_2 = np.max(y_2, axis=1) # Represent the second tessalation using a Polygon from the shapely package poly_2 = [ shapely_geometry.Polygon( [(x_2[j, 0], y_2[j, 0]), (x_2[j, 1], y_2[j, 1]), (x_2[j, 2], y_2[j, 2])] ) for j in range(n_2) ] # Loop over all triangles in first tessalation, look for overlapping # members in second tessalation for i in range(n_1): # Polygon representation of the first triangle. poly_1 = shapely_geometry.Polygon( [(x_1[i, 0], y_1[i, 0]), (x_1[i, 1], y_1[i, 1]), (x_1[i, 2], y_1[i, 2])] ) # Find triangles in the second tessalation that are outside the # bounding box of this triangle. right = np.squeeze(np.where(min_x_2 > max_x_1[i])) left = np.squeeze(np.where(max_x_2 < min_x_1[i])) above = np.squeeze(np.where(min_y_2 > max_y_1[i])) below = np.squeeze(np.where(max_y_2 < min_y_1[i])) # Candidates for intersection are only elements not outside outside = np.unique(np.hstack((right, left, above, below))) candidates = np.setdiff1d(np.arange(n_2), outside, assume_unique=True) # Loop over remaining candidates, call upon shapely to find # intersection for j in candidates: isect = poly_1.intersection(poly_2[j]) if isinstance(isect, shapely_geometry.Polygon): intersections.append((i, j, isect.area)) return intersections
def triangulations( p_1: np.ndarray, p_2: np.ndarray, t_1: np.ndarray, t_2: np.ndarray ) -> List[Tuple[int, int, float]]: """Compute intersection of two triangle tesselations of a surface. The function will identify partly overlapping triangles between t_1 and t_2, and compute their common area. If parts of domain 1 or 2 is covered by one tessalation only, this will simply be ignored by the function. Implementation note: The function relies on the intersection algorithm in shapely.geometry.Polygon. It may be possible to extend the functionality to other cell shapes. This would require more general data structures, but should not be too much of an effort. Parameters: p_1 (np.array, 2 x n_p1): Points in first tessalation. p_2 (np.array, 2 x n_p2): Points in second tessalation. t_1 (np.array, 3 x n_tri_1): Triangles in first tessalation, referring to indices in p_1. t_2 (np.array, 3 x n_tri_1): Triangles in second tessalation, referring to indices in p_2. Returns: list of tuples: Each representing an overlap. The tuple contains index of the overlapping triangles in the first and second tessalation, and their common area. See also: surface_tessalations() """ import shapely.geometry as shapely_geometry import shapely.speedups as shapely_speedups try: shapely_speedups.enable() except AttributeError: pass n_1 = t_1.shape[1] n_2 = t_2.shape[1] t_1 = t_1.T t_2 = t_2.T # Find x and y coordinates of the triangles of first tessalation x_1 = p_1[0, t_1] y_1 = p_1[1, t_1] # Same with second tessalation x_2 = p_2[0, t_2] y_2 = p_2[1, t_2] intersections: List[Tuple[int, int, float]] = [] # Bounding box of each triangle for first and second tessalation min_x_1 = np.min(x_1, axis=1) max_x_1 = np.max(x_1, axis=1) min_y_1 = np.min(y_1, axis=1) max_y_1 = np.max(y_1, axis=1) min_x_2 = np.min(x_2, axis=1) max_x_2 = np.max(x_2, axis=1) min_y_2 = np.min(y_2, axis=1) max_y_2 = np.max(y_2, axis=1) # Represent the second tessalation using a Polygon from the shapely package poly_2 = [ shapely_geometry.Polygon( [(x_2[j, 0], y_2[j, 0]), (x_2[j, 1], y_2[j, 1]), (x_2[j, 2], y_2[j, 2])] ) for j in range(n_2) ] # Loop over all triangles in first tessalation, look for overlapping # members in second tessalation for i in range(n_1): # Polygon representation of the first triangle. poly_1 = shapely_geometry.Polygon( [(x_1[i, 0], y_1[i, 0]), (x_1[i, 1], y_1[i, 1]), (x_1[i, 2], y_1[i, 2])] ) # Find triangles in the second tessalation that are outside the # bounding box of this triangle. right = np.squeeze(np.where(min_x_2 > max_x_1[i])) left = np.squeeze(np.where(max_x_2 < min_x_1[i])) above = np.squeeze(np.where(min_y_2 > max_y_1[i])) below = np.squeeze(np.where(max_y_2 < min_y_1[i])) # Candidates for intersection are only elements not outside outside = np.unique(np.hstack((right, left, above, below))) candidates = np.setdiff1d(np.arange(n_2), outside, assume_unique=True) # Loop over remaining candidates, call upon shapely to find # intersection for j in candidates: isect = poly_1.intersection(poly_2[j]) if isinstance(isect, shapely_geometry.Polygon): intersections.append((i, j, isect.area)) return intersections
1,655
def test_fit_transform(): alpha = 1 rng = np.random.RandomState(0) Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha, random_state=0) spca_lars.fit(Y) # variance computed by default shape (n_components, ) assert (spca_lars.explained_variance_.shape == (3,)) # Test that CD gives similar results spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0, alpha=alpha) spca_lasso.fit(Y) # variance computed by default shape (n_components, ) assert (spca_lasso.explained_variance_.shape == (3, )) assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
def test_fit_transform(): alpha = 1 rng = np.random.RandomState(0) Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha, random_state=0) spca_lars.fit(Y) # variance computed by default shape (n_components, ) assert (spca_lars.explained_variance_.shape == (3,)) # Test that CD gives similar results spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0, alpha=alpha) spca_lasso.fit(Y) # variance computed by default shape (n_components, ) assert spca_lasso.explained_variance_.shape == (3,) assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
35,445
def perform_ota_update(manifest_path: str): # reboot into recovery with open(manifest_path) as f: m = json.loads(f.read()) ota_fn = os.path.join(NEOSUPDATE_DIR, os.path.basename(m['ota_url'])) with open(RECOVERY_COMMAND, "wb") as rf: rf.write(bytes(f"--update_package={ota_fn}\n", encoding='utf-8')) os.system("service call power 16 i32 0 s16 recovery i32 1")
def perform_ota_update(manifest_path: str): # reboot into recovery with open(manifest_path) as f: m = json.loads(f.read()) ota_fn = os.path.join(NEOSUPDATE_DIR, os.path.basename(m['ota_url'])) with open(RECOVERY_COMMAND, "wb") as rf: rf.write(f"--update_package={ota_fn}\n".encode(), e) os.system("service call power 16 i32 0 s16 recovery i32 1")
22,770
def _report_next_steps(config: interfaces.IConfig, installer_err: Optional[errors.Error], lineage: Optional[storage.RenewableCert], new_or_renewed_cert: bool = True) -> None: """Displays post-run/certonly advice to the user about renewal and installation. The output varies by runtime configuration and any errors encountered during installation. :param config: Configuration object :type config: interfaces.IConfig :param installer_err: The installer/enhancement error encountered, if any. :type error: Optional[errors.Error] :param lineage: The resulting certificate lineage from the issuance, if any. :type lineage: Optional[storage.RenewableCert] :param bool new_or_renewed_cert: Whether the verb execution resulted in a certificate being saved (created or renewed). """ steps: List[str] = [] # If the installation or enhancement raised an error, show advice on trying again if installer_err: steps.append( "The certificate was saved, but could not be installed (installer: " f"{config.installer}). After fixing the error shown below, try installing it again " f"by running:\n {cli.cli_command} install --cert-name " f"{_cert_name_from_config_or_lineage(config, lineage)}" ) # If a certificate was obtained or renewed, show applicable renewal advice if new_or_renewed_cert: if config.csr: steps.append( "Certificates created using --csr will not be renewed automatically by Certbot. " "You will need to renew the certificate before it expires, by running the same " "Certbot command again.") elif _is_interactive_only_auth(config): steps.append( "This certificate will not be renewed automatically by Certbot. The --manual " "plugin requires the use of an authentication hook script (--manual-auth-hook) " "in order to support autorenewal. To renew this certificate, repeat this same " f"{cli.cli_command} command, before the certificate's expiry date." ) elif not config.preconfigured_renewal: steps.append( "The certificate will need to be renewed before it expires. Certbot can " "automatically renew the certificate in the background, but you may need " "to take steps to enable that functionality. " "See https://certbot.org/renewal-setup for instructions.") if not steps: return # TODO: refactor ANSI escapes during https://github.com/certbot/certbot/issues/8848 (bold_on, bold_off) = [c if sys.stdout.isatty() and not config.quiet else '' \ for c in (util.ANSI_SGR_BOLD, util.ANSI_SGR_RESET)] print(bold_on, '\n', 'NEXT STEPS:', bold_off, sep='') for step in steps: display_util.notify(f"- {step}") # If there was an installer error, segregate the error output with a trailing newline if installer_err: print()
def _report_next_steps(config: interfaces.IConfig, installer_err: Optional[errors.Error], lineage: Optional[storage.RenewableCert], new_or_renewed_cert: bool = True) -> None: """Displays post-run/certonly advice to the user about renewal and installation. The output varies by runtime configuration and any errors encountered during installation. :param config: Configuration object :type config: interfaces.IConfig :param installer_err: The installer/enhancement error encountered, if any. :type error: Optional[errors.Error] :param lineage: The resulting certificate lineage from the issuance, if any. :type lineage: Optional[storage.RenewableCert] :param bool new_or_renewed_cert: Whether the verb execution resulted in a certificate being saved (created or renewed). """ steps: List[str] = [] # If the installation or enhancement raised an error, show advice on trying again if installer_err: steps.append( "The certificate was saved, but could not be installed (installer: " f"{config.installer}). After fixing the error shown below, try installing it again " f"by running:\n {cli.cli_command} install --cert-name " f"{_cert_name_from_config_or_lineage(config, lineage)}" ) # If a certificate was obtained or renewed, show applicable renewal advice if new_or_renewed_cert: if config.csr: steps.append( "Certificates created using --csr will not be renewed automatically by Certbot. " "You will need to renew the certificate before it expires, by running the same " "Certbot command again.") elif _is_interactive_only_auth(config): steps.append( "This certificate will not be renewed automatically by Certbot. The --manual " "plugin requires the use of an authentication hook script (--manual-auth-hook) " "in order to support autorenewal, which this certificate invocation did not provide. To renew this certificate, repeat this same " f"{cli.cli_command} command, before the certificate's expiry date." ) elif not config.preconfigured_renewal: steps.append( "The certificate will need to be renewed before it expires. Certbot can " "automatically renew the certificate in the background, but you may need " "to take steps to enable that functionality. " "See https://certbot.org/renewal-setup for instructions.") if not steps: return # TODO: refactor ANSI escapes during https://github.com/certbot/certbot/issues/8848 (bold_on, bold_off) = [c if sys.stdout.isatty() and not config.quiet else '' \ for c in (util.ANSI_SGR_BOLD, util.ANSI_SGR_RESET)] print(bold_on, '\n', 'NEXT STEPS:', bold_off, sep='') for step in steps: display_util.notify(f"- {step}") # If there was an installer error, segregate the error output with a trailing newline if installer_err: print()
11,443
def resolve_tenant(default_tenant, tenant_id=None, **_): # type: (str, Optional[str], **Any) -> str """Returns the correct tenant for a token request given a credential's configuration""" if tenant_id is None: return default_tenant if (default_tenant == "adfs" or os.environ.get(EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH) ): _LOGGER.info("A token was request for a different tenant than was configured on the credential, " "but the configured value was used since multi tenant authentication has been disabled. " "Configured TenantId: {}, Requested TenantId {}".format(default_tenant, tenant_id)) return default_tenant _LOGGER.info("A token was requested for a different tenant than was configured on the credential, " "and the requested tenant id was used to authenticate. Configured TenantId: {}, " "Requested TenantId {}".format(default_tenant, tenant_id)) return tenant_id
def resolve_tenant(default_tenant, tenant_id=None, **_): # type: (str, Optional[str], **Any) -> str """Returns the correct tenant for a token request given a credential's configuration""" if tenant_id is None: return default_tenant if (default_tenant == "adfs" or os.environ.get(EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH) ): _LOGGER.info("A token was request for a different tenant than was configured on the credential, " "but the configured value was used since multi tenant authentication has been disabled. " "Configured tenant ID: {}, Requested tenant ID {}".format(default_tenant, tenant_id)) return default_tenant _LOGGER.info("A token was requested for a different tenant than was configured on the credential, " "and the requested tenant id was used to authenticate. Configured TenantId: {}, " "Requested TenantId {}".format(default_tenant, tenant_id)) return tenant_id
17,624
def create_pep_json(peps: list[parser.PEP]) -> str: return json.dumps({pep.number: pep.full_details for pep in peps}, indent=1)
def create_pep_json(peps: list[parser.PEP]) -> str: return json.dumps({pep.number: pep.to_json() for pep in peps}, indent=1)
23,606
def singlediode(photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, ivcurve_pnts=None, method='lambertw'): r""" Solve the single-diode model to obtain a photovoltaic IV curve. Singlediode solves the single diode equation [1]_ .. math:: I = I_L - I_0 \left[ \exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1 \right] - \frac{V + I R_s}{R_{sh}} for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and :math:`n N_s V_{th}` which are described later. Returns a DataFrame which contains the 5 points on the I-V curve specified in SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and :math:`n N_s V_{th}` are scalar, a single curve will be returned, if any are Series (of the same length), multiple IV curves will be calculated. The input parameters can be calculated using :py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data. Parameters ---------- photocurrent : numeric Light-generated current :math:`I_L` (photocurrent) under desired IV curve conditions. ``0 <= photocurrent``. [A] saturation_current : numeric Diode saturation :math:`I_0` current under desired IV curve conditions. ``0 < saturation_current``. [A] resistance_series : numeric Series resistance :math:`R_s` under desired IV curve conditions. ``0 <= resistance_series < numpy.inf``. [ohms] resistance_shunt : numeric Shunt resistance :math:`R_{sh}` under desired IV curve conditions. ``0 < resistance_shunt <= numpy.inf``. [ohms] nNsVth : numeric The product of three components. 1) The usual diode ideal factor :math:`n`, 2) the number of cells in series :math:`N_s`, and 3) the cell thermal voltage under the desired IV curve conditions :math:`V_{th}`. The thermal voltage of the cell (in volts) may be calculated as :math:`k_B T_c / q`, where :math:`k_B` is Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n junction in Kelvin, and :math:`q` is the charge of an electron (coulombs). ``0 < nNsVth``. [V] ivcurve_pnts : None or int, default None Number of points in the desired IV curve. If None or 0, no IV curves will be produced. method : str, default 'lambertw' Determines the method used to calculate points on the IV curve. The options are ``'lambertw'``, ``'newton'``, or ``'brentq'``. Returns ------- OrderedDict or DataFrame The returned dict-like object always contains the keys/columns: * i_sc - short circuit current in amperes. * v_oc - open circuit voltage in volts. * i_mp - current at maximum power point in amperes. * v_mp - voltage at maximum power point in volts. * p_mp - power at maximum power point in watts. * i_x - current, in amperes, at ``v = 0.5*v_oc``. * i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``. If ivcurve_pnts is greater than 0, the output dictionary will also include the keys: * i - IV curve current in amperes. * v - IV curve voltage in volts. The output will be an OrderedDict if photocurrent is a scalar, array, or ivcurve_pnts is not None. The output will be a DataFrame if photocurrent is a Series and ivcurve_pnts is None. Notes ----- If the method is ``'lambertw'`` then the solution employed to solve the implicit diode equation utilizes the Lambert W function to obtain an explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_. If the method is ``'newton'`` then the root-finding Newton-Raphson method is used. It should be safe for well behaved IV-curves, but the ``'brentq'`` method is recommended for reliability. If the method is ``'brentq'`` then Brent's bisection search method is used that guarantees convergence by bounding the voltage between zero and open-circuit. If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts`` are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to calculate the points on the IV curve points at diode voltages from zero to open-circuit voltage with a log spacing that gets closer as voltage increases. If the method is ``'lambertw'`` then the calculated points on the IV curve are linearly spaced. References ---------- .. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN 0 86758 909 4 .. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the parameters of real solar cells using Lambert W-function", Solar Energy Materials and Solar Cells, 81 (2004) 269-277. .. [3] D. King et al, "Sandia Photovoltaic Array Performance Model", SAND2004-3535, Sandia National Laboratories, Albuquerque, NM .. [4] "Computer simulation of the effects of electrical mismatches in photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988) https://doi.org/10.1016/0379-6787(88)90059-2 See also -------- sapm calcparams_desoto pvlib.singlediode.bishop88 """ # Calculate points on the IV curve using the LambertW solution to the # single diode equation if method.lower() == 'lambertw': out = _singlediode._lambertw( photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, ivcurve_pnts ) i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7] if ivcurve_pnts: ivcurve_i, ivcurve_v = out[7:] else: # Calculate points on the IV curve using either 'newton' or 'brentq' # methods. Voltages are determined by first solving the single diode # equation for the diode voltage V_d then backing out voltage args = (photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth) # collect args v_oc = _singlediode.bishop88_v_from_i( 0.0, *args, method=method.lower() ) i_mp, v_mp, p_mp = _singlediode.bishop88_mpp( *args, method=method.lower() ) i_sc = _singlediode.bishop88_i_from_v( 0.0, *args, method=method.lower() ) i_x = _singlediode.bishop88_i_from_v( v_oc / 2.0, *args, method=method.lower() ) i_xx = _singlediode.bishop88_i_from_v( (v_oc + v_mp) / 2.0, *args, method=method.lower() ) # calculate the IV curve if requested using bishop88 if ivcurve_pnts: vd = v_oc * ( (11.0 - np.logspace(np.log10(11.0), 0.0, ivcurve_pnts)) / 10.0 ) ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args) out = OrderedDict() out['i_sc'] = i_sc out['v_oc'] = v_oc out['i_mp'] = i_mp out['v_mp'] = v_mp out['p_mp'] = p_mp out['i_x'] = i_x out['i_xx'] = i_xx if ivcurve_pnts: out['v'] = ivcurve_v out['i'] = ivcurve_i if isinstance(photocurrent, pd.Series) and not ivcurve_pnts: out = pd.DataFrame(out, index=photocurrent.index) return out
def singlediode(photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, ivcurve_pnts=None, method='lambertw'): r""" Solve the single-diode model to obtain a photovoltaic IV curve. Singlediode solves the single diode equation [1]_ .. math:: I = I_L - I_0 \left[ \exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1 \right] - \frac{V + I R_s}{R_{sh}} for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and :math:`n N_s V_{th}` which are described later. Returns a DataFrame which contains the 5 points on the I-V curve specified in [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and :math:`n N_s V_{th}` are scalar, a single curve will be returned, if any are Series (of the same length), multiple IV curves will be calculated. The input parameters can be calculated using :py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data. Parameters ---------- photocurrent : numeric Light-generated current :math:`I_L` (photocurrent) under desired IV curve conditions. ``0 <= photocurrent``. [A] saturation_current : numeric Diode saturation :math:`I_0` current under desired IV curve conditions. ``0 < saturation_current``. [A] resistance_series : numeric Series resistance :math:`R_s` under desired IV curve conditions. ``0 <= resistance_series < numpy.inf``. [ohms] resistance_shunt : numeric Shunt resistance :math:`R_{sh}` under desired IV curve conditions. ``0 < resistance_shunt <= numpy.inf``. [ohms] nNsVth : numeric The product of three components. 1) The usual diode ideal factor :math:`n`, 2) the number of cells in series :math:`N_s`, and 3) the cell thermal voltage under the desired IV curve conditions :math:`V_{th}`. The thermal voltage of the cell (in volts) may be calculated as :math:`k_B T_c / q`, where :math:`k_B` is Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n junction in Kelvin, and :math:`q` is the charge of an electron (coulombs). ``0 < nNsVth``. [V] ivcurve_pnts : None or int, default None Number of points in the desired IV curve. If None or 0, no IV curves will be produced. method : str, default 'lambertw' Determines the method used to calculate points on the IV curve. The options are ``'lambertw'``, ``'newton'``, or ``'brentq'``. Returns ------- OrderedDict or DataFrame The returned dict-like object always contains the keys/columns: * i_sc - short circuit current in amperes. * v_oc - open circuit voltage in volts. * i_mp - current at maximum power point in amperes. * v_mp - voltage at maximum power point in volts. * p_mp - power at maximum power point in watts. * i_x - current, in amperes, at ``v = 0.5*v_oc``. * i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``. If ivcurve_pnts is greater than 0, the output dictionary will also include the keys: * i - IV curve current in amperes. * v - IV curve voltage in volts. The output will be an OrderedDict if photocurrent is a scalar, array, or ivcurve_pnts is not None. The output will be a DataFrame if photocurrent is a Series and ivcurve_pnts is None. Notes ----- If the method is ``'lambertw'`` then the solution employed to solve the implicit diode equation utilizes the Lambert W function to obtain an explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_. If the method is ``'newton'`` then the root-finding Newton-Raphson method is used. It should be safe for well behaved IV-curves, but the ``'brentq'`` method is recommended for reliability. If the method is ``'brentq'`` then Brent's bisection search method is used that guarantees convergence by bounding the voltage between zero and open-circuit. If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts`` are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to calculate the points on the IV curve points at diode voltages from zero to open-circuit voltage with a log spacing that gets closer as voltage increases. If the method is ``'lambertw'`` then the calculated points on the IV curve are linearly spaced. References ---------- .. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN 0 86758 909 4 .. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the parameters of real solar cells using Lambert W-function", Solar Energy Materials and Solar Cells, 81 (2004) 269-277. .. [3] D. King et al, "Sandia Photovoltaic Array Performance Model", SAND2004-3535, Sandia National Laboratories, Albuquerque, NM .. [4] "Computer simulation of the effects of electrical mismatches in photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988) https://doi.org/10.1016/0379-6787(88)90059-2 See also -------- sapm calcparams_desoto pvlib.singlediode.bishop88 """ # Calculate points on the IV curve using the LambertW solution to the # single diode equation if method.lower() == 'lambertw': out = _singlediode._lambertw( photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, ivcurve_pnts ) i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7] if ivcurve_pnts: ivcurve_i, ivcurve_v = out[7:] else: # Calculate points on the IV curve using either 'newton' or 'brentq' # methods. Voltages are determined by first solving the single diode # equation for the diode voltage V_d then backing out voltage args = (photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth) # collect args v_oc = _singlediode.bishop88_v_from_i( 0.0, *args, method=method.lower() ) i_mp, v_mp, p_mp = _singlediode.bishop88_mpp( *args, method=method.lower() ) i_sc = _singlediode.bishop88_i_from_v( 0.0, *args, method=method.lower() ) i_x = _singlediode.bishop88_i_from_v( v_oc / 2.0, *args, method=method.lower() ) i_xx = _singlediode.bishop88_i_from_v( (v_oc + v_mp) / 2.0, *args, method=method.lower() ) # calculate the IV curve if requested using bishop88 if ivcurve_pnts: vd = v_oc * ( (11.0 - np.logspace(np.log10(11.0), 0.0, ivcurve_pnts)) / 10.0 ) ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args) out = OrderedDict() out['i_sc'] = i_sc out['v_oc'] = v_oc out['i_mp'] = i_mp out['v_mp'] = v_mp out['p_mp'] = p_mp out['i_x'] = i_x out['i_xx'] = i_xx if ivcurve_pnts: out['v'] = ivcurve_v out['i'] = ivcurve_i if isinstance(photocurrent, pd.Series) and not ivcurve_pnts: out = pd.DataFrame(out, index=photocurrent.index) return out
39,562
def parse_advisory(component) -> Iterable[VendorData]: response = component[0] if response["vulnerabilities"]: for vuln in response["vulnerabilities"]: aliases = [vuln["id"]] affected_versions = [] fixed_versions = [] if "versionRanges" in vuln: affected_versions.extend(vuln["versionRanges"]) yield VendorData( aliases=aliases, affected_versions=affected_versions, fixed_versions=fixed_versions, )
def parse_advisory(component) -> Iterable[VendorData]: response = component[0] if response.get("vulnerabilities"): for vuln in response["vulnerabilities"]: aliases = [vuln["id"]] affected_versions = [] fixed_versions = [] if "versionRanges" in vuln: affected_versions.extend(vuln["versionRanges"]) yield VendorData( aliases=aliases, affected_versions=affected_versions, fixed_versions=fixed_versions, )
49,103
def test_MarginalDistribution(): a1, p1, p2 = symbols('a1 p1 p2', positive=True) C = Multinomial('C', 2, p1, p2) B = MultivariateBeta('B', a1, C[0]) MGR = MarginalDistribution(B, (C[0],)) mgrc = Mul(Symbol('B'), Piecewise(ExprCondPair(Mul(2, Pow(Symbol('p1', positive=True), Indexed(IndexedBase('C'), 0)), Pow(Symbol('p2', positive=True), Indexed(IndexedBase('C'), 1)), Pow(factorial(Indexed(IndexedBase('C'), 0)), -1), Pow(factorial(Indexed(IndexedBase('C'), 1)), -1)), Eq(Add(Indexed(IndexedBase('C'), 0), Indexed(IndexedBase('C'), 1)), 2)), ExprCondPair(0, True)), Pow(gamma(Symbol('a1', positive=True)), -1), gamma(Add(Symbol('a1', positive=True), Indexed(IndexedBase('C'), 0))), Pow(gamma(Indexed(IndexedBase('C'), 0)), -1), Pow(Indexed(IndexedBase('B'), 0), Add(Symbol('a1', positive=True), -1)), Pow(Indexed(IndexedBase('B'), 1), Add(Indexed(IndexedBase('C'), 0), -1))) assert MGR(C) == mgrc, MGR(C)
def test_MarginalDistribution(): a1, p1, p2 = symbols('a1 p1 p2', positive=True) C = Multinomial('C', 2, p1, p2) B = MultivariateBeta('B', a1, C[0]) MGR = MarginalDistribution(B, (C[0],)) mgrc = Mul(Symbol('B'), Piecewise(ExprCondPair(Mul(2, Pow(Symbol('p1', positive=True), Indexed(IndexedBase('C'), 0)), Pow(Symbol('p2', positive=True), Indexed(IndexedBase('C'), 1)), Pow(factorial(Indexed(IndexedBase('C'), 0)), -1), Pow(factorial(Indexed(IndexedBase('C'), 1)), -1)), Eq(Add(Indexed(IndexedBase('C'), 0), Indexed(IndexedBase('C'), 1)), 2)), ExprCondPair(0, True)), Pow(gamma(Symbol('a1', positive=True)), -1), gamma(Add(Symbol('a1', positive=True), Indexed(IndexedBase('C'), 0))), Pow(gamma(Indexed(IndexedBase('C'), 0)), -1), Pow(Indexed(IndexedBase('B'), 0), Add(Symbol('a1', positive=True), -1)), Pow(Indexed(IndexedBase('B'), 1), Add(Indexed(IndexedBase('C'), 0), -1))) from sympy import srepr assert MGR(C) == mgrc, ('\n',srepr(mgrc),'\n',srepr(MGR(C)))
20,642
def _get_scope_hierarchy(): """ Returns: scope hierarchy (dict): dictionary of available scopes and their hierarchy where scopes.keys() = top level scopes and scopes that have their own subscopes scopes.values() = list of immediate subscopes or None """ subscope_lists = [ value['subscopes'] for value in scopes.scope_definitions.values() if 'subscopes' in value ] scope_hierarchy = {} for scope in scopes.scope_definitions.keys(): has_subscopes = scopes.scope_definitions[scope].get('subscopes') is_subscope = any(scope in subscope_list for subscope_list in subscope_lists) if has_subscopes: scope_hierarchy[scope] = scopes.scope_definitions[scope]['subscopes'] else: if not is_subscope: scope_hierarchy[scope] = None return scope_hierarchy
def _get_scope_hierarchy(): """ Returns: scope hierarchy (dict): dictionary of available scopes and their hierarchy where scopes.keys() = top level scopes and scopes that have their own subscopes scopes.values() = list of immediate subscopes or None """ subscope_lists = [ value['subscopes'] for value in scopes.scope_definitions.values() if 'subscopes' in value ] scope_hierarchy = {} for scope, definition in scopes.scope_definitions.items(): has_subscopes = scopes.scope_definitions[scope].get('subscopes') is_subscope = any(scope in subscope_list for subscope_list in subscope_lists) if has_subscopes: scope_hierarchy[scope] = scopes.scope_definitions[scope]['subscopes'] else: if not is_subscope: scope_hierarchy[scope] = None return scope_hierarchy
50,665
def check_for_errors(component: icalendar.cal.Component, calendar: str, href: str): """checking if component.errors exists, is not empty and if so warn the user""" if hasattr(component, 'errors') and component.errors: logger.error( 'Errors occurred when parsing {}/{} for the following ' 'reasons:'.format(calendar, href)) for error in component.errors: logger.error(error) logger.error('This might lead to this event being shown wrongly or not at all.')
def check_for_errors(component: icalendar.cal.Component, calendar: str, href: str): """checking if component.errors exists, is not empty and if so warn the user""" if hasattr(component, 'errors') and component.errors: logger.error( f'Errors occurred when parsing {calendar}/{href} for ' 'the following reasons:') for error in component.errors: logger.error(error) logger.error('This might lead to this event being shown wrongly or not at all.')
40,103
def get_update(request_parameters: ImmutableMultiDict) -> list: ''' Parse the update parameter from request parameters. Update is a list of analysis plugins that shall be updated. :param request_parameters: dict containing the request parameters. :return: The list of analysis plugins. ''' try: update = json.loads(request_parameters.get('update')) except (AttributeError, KeyError, TypeError): raise ValueError('Malformed or missing parameter: update') except json.JSONDecodeError: raise ValueError('Update parameter has to be a list') if not isinstance(update, list): raise ValueError('Update must be a list') if not update: raise ValueError('Update has to be specified') return update
def get_update(request_parameters: ImmutableMultiDict) -> list: ''' Parse the update parameter from request parameters. Update is a list of analysis plugins whose analysis results shall be updated. :param request_parameters: dict containing the request parameters. :return: The list of analysis plugins. ''' try: update = json.loads(request_parameters.get('update')) except (AttributeError, KeyError, TypeError): raise ValueError('Malformed or missing parameter: update') except json.JSONDecodeError: raise ValueError('Update parameter has to be a list') if not isinstance(update, list): raise ValueError('Update must be a list') if not update: raise ValueError('Update has to be specified') return update
42,742
def evm_address_to_identifier( address: str, chain: ChainID, token_type: EvmTokenKind, collectible_id: Optional[str] = None, ) -> str: """Format an EVM token information into the CAIPs identifier format""" ident = f'{EVM_CHAIN_DIRECTIVE}:{chain.value}/{str(token_type)}:{address}' if collectible_id is not None: return ident + f'/{collectible_id}' return ident
def evm_address_to_identifier( address: str, chain: ChainID, token_type: EvmTokenKind, collectible_id: Optional[str] = None, ) -> str: """Forma EVM token information into the CAIPs identifier format""" ident = f'{EVM_CHAIN_DIRECTIVE}:{chain.value}/{str(token_type)}:{address}' if collectible_id is not None: return ident + f'/{collectible_id}' return ident
23,807
def _validate_fpic(conanfile): v2_mode = get_env(CONAN_V2_MODE_ENVVAR, False) # FIXME: The toolchain() has dissapeared, but now it is integrated with generators. # FIXME: Not sure this check should be here, this could raise before the graph is fully evaluated toolchain = hasattr(conanfile, "toolchain") if not (toolchain or v2_mode): return fpic = conanfile.options.get_safe("fPIC") if fpic is None: return os_ = conanfile.settings.get_safe("os") if os_ and "Windows" in os_: if v2_mode: raise ConanInvalidConfiguration("fPIC option defined for Windows") conanfile.output.error("fPIC option defined for Windows") return shared = conanfile.options.get_safe("shared") if shared: if v2_mode: raise ConanInvalidConfiguration("fPIC option defined for a shared library") conanfile.output.error("fPIC option defined for a shared library")
def _validate_fpic(conanfile): v2_mode = get_env(CONAN_V2_MODE_ENVVAR, False) # FIXME: The toolchain() has dissapeared, but now it is integrated with generators. # FIXME: Not sure this check should be here, this could raise before the graph is fully evaluated toolchain = hasattr(conanfile, "toolchain") or hasattr(conanfile, "generate") if not (toolchain or v2_mode): return fpic = conanfile.options.get_safe("fPIC") if fpic is None: return os_ = conanfile.settings.get_safe("os") if os_ and "Windows" in os_: if v2_mode: raise ConanInvalidConfiguration("fPIC option defined for Windows") conanfile.output.error("fPIC option defined for Windows") return shared = conanfile.options.get_safe("shared") if shared: if v2_mode: raise ConanInvalidConfiguration("fPIC option defined for a shared library") conanfile.output.error("fPIC option defined for a shared library")
32,009
def get_collections(client: PrismaCloudComputeClient, args: dict) -> CommandResults: """ Get collections information, implement the 'command prisma-cloud-compute-collections-list' Args: client (PrismaCloudComputeClient): prisma-cloud-compute client. args (dict): prisma-cloud-compute-collections-list command arguments Returns: CommandResults: command-results object. """ limit, _ = parse_limit_and_offset_values(limit=args.get("limit", "50")) if collections := filter_api_response(api_response=client.get_collections(), limit=limit): for collection in collections: if "modified" in collection: collection["modified"] = parse_date_string_format(date_string=collection.get("modified")) table = tableToMarkdown( name="Collections Information", t=collections, headers=["name", "description", "owner", "modified"], removeNull=True, headerTransform=lambda word: word[0].upper() + word[1:] ) else: collections, table = [], "No results found" return CommandResults( outputs_prefix="PrismaCloudCompute.Collection", outputs_key_field=["name", "owner", "description"], outputs=collections if collections else None, readable_output=table, raw_response=collections )
def get_collections(client: PrismaCloudComputeClient, args: dict) -> CommandResults: """ Get collections information, implement the 'command prisma-cloud-compute-collections-list' Args: client (PrismaCloudComputeClient): prisma-cloud-compute client. args (dict): prisma-cloud-compute-collections-list command arguments Returns: CommandResults: command-results object. """ limit, _ = parse_limit_and_offset_values(limit=args.get("limit", "50")) if collections := filter_api_response(api_response=client.get_collections(), limit=limit): for collection in collections: if "modified" in collection: collection["modified"] = parse_date_string_format(date_string=collection.get("modified")) table = tableToMarkdown( name="Collections Information", t=collections, headers=["name", "description", "owner", "modified"], removeNull=True, headerTransform=lambda word: word[0].upper() + word[1:] ) else: collections, table = [], "No results found." return CommandResults( outputs_prefix="PrismaCloudCompute.Collection", outputs_key_field=["name", "owner", "description"], outputs=collections if collections else None, readable_output=table, raw_response=collections )
54,318
def check_wavelength_range(verbose=True, warnings=True, *args, **kwargs): if verbose: printm("Testing calc_spectrum wavelength range") s = calc_spectrum( wavelength_min=4348, # nm wavelength_max=5000, molecule="CO", isotope="1,2,3", pressure=1.01325, # bar Tvib=1700, # K Trot=1700, # K databank="HITRAN-CO-TEST", wstep=0.01, ) w, I = s.get("radiance_noslit", wunit="nm", Iunit="mW/sr/cm2/nm") assert np.isclose(w.min(), 4348, atol=0.01) assert np.isclose(w.max(), 5000, atol=0.01) return True
def check_wavelength_range(verbose=True, warnings=True, *args, **kwargs): """Check that input wavelength is correctly taken into account. See https://github.com/radis/radis/issues/214 """ if verbose: printm("Testing calc_spectrum wavelength range") s = calc_spectrum( wavelength_min=4348, # nm wavelength_max=5000, molecule="CO", isotope="1,2,3", pressure=1.01325, # bar Tvib=1700, # K Trot=1700, # K databank="HITRAN-CO-TEST", wstep=0.01, ) w, I = s.get("radiance_noslit", wunit="nm", Iunit="mW/sr/cm2/nm") assert np.isclose(w.min(), 4348, atol=0.01) assert np.isclose(w.max(), 5000, atol=0.01) return True
42,417
def crop_image(raster, geoms, all_touched=True): """Crop a single file using geometry objects. Parameters ---------- raster : rasterio.io.DatasetReader object The rasterio object to be cropped. geoms : geopandas geodataframe or list of polygons The spatial polygon boundaries in GeoJSON-like dict format to be used to crop the image. All data outside of the polygon boundaries will be set to nodata and/or removed from the image. all_touched : bool (default=True) Include a pixel in the mask if it touches any of the shapes. If False, include a pixel only if its center is within one of the shapes, or if it is selected by Bresenham's line algorithm. (from rasterio) Returns ---------- tuple out_image: cropped numpy array A numpy array that is cropped to the geoms object extent with shape (bands, rows, columns) out_meta: dict A dictionary containing updated metadata for the cropped raster, including extent (shape elements) and transform properties. Example ------- >>> import geopandas as gpd >>> import rasterio as rio >>> import earthpy.spatial as es >>> from earthpy.io import path_to_example >>> # Clip an RGB image to the extent of Rocky Mountain National Park >>> rmnp = gpd.read_file(path_to_example("rmnp.shp")) >>> with rio.open(path_to_example("rmnp-rgb.tif")) as src: ... in_image = src.read() ... out_image, out_meta = es.crop_image(src, rmnp) >>> in_image.shape (3, 373, 485) >>> out_image.shape (3, 265, 281) """ if isinstance(geoms, gpd.geodataframe.GeoDataFrame): clip_extent = [extent_to_json(geoms)] else: clip_extent = geoms out_image, out_transform = mask( raster, clip_extent, crop=True, all_touched=all_touched ) out_meta = raster.meta.copy() out_meta.update( { "driver": "GTiff", "height": out_image.shape[1], "width": out_image.shape[2], "transform": out_transform, } ) return out_image, out_meta
def crop_image(raster, geoms, all_touched=True): """Crop a single file using geometry objects. Parameters ---------- raster : rasterio.io.DatasetReader object The rasterio object to be cropped. geoms : geopandas geodataframe or list of polygons The spatial polygon boundaries in GeoJSON-like dict format to be used to crop the image. All data outside of the polygon boundaries will be set to nodata and/or removed from the image. all_touched : bool (default=True) Include a pixel in the mask if it touches any of the shapes. If False, include a pixel only if its center is within one of the shapes, or if it is selected by Bresenham's line algorithm. (from rasterio) Returns ---------- tuple out_image: cropped numpy array A numpy array that is cropped to the geoms object extent with shape (bands, rows, columns) out_meta: dict A dictionary containing updated metadata for the cropped raster, including extent (shape elements) and transform properties. Example ------- >>> import geopandas as gpd >>> import rasterio as rio >>> import earthpy.spatial as es >>> from earthpy.io import path_to_example >>> # Clip an RGB image to the extent of Rocky Mountain National Park >>> rmnp = gpd.read_file(path_to_example("rmnp.shp")) >>> with rio.open(path_to_example("rmnp-rgb.tif")) as src: ... in_image = src.read() ... cropped_raster, cropped_meta = es.crop_image(src_raster, rmnp) >>> in_image.shape (3, 373, 485) >>> out_image.shape (3, 265, 281) """ if isinstance(geoms, gpd.geodataframe.GeoDataFrame): clip_extent = [extent_to_json(geoms)] else: clip_extent = geoms out_image, out_transform = mask( raster, clip_extent, crop=True, all_touched=all_touched ) out_meta = raster.meta.copy() out_meta.update( { "driver": "GTiff", "height": out_image.shape[1], "width": out_image.shape[2], "transform": out_transform, } ) return out_image, out_meta
33,722
def start(detached: bool = False, http_host: str = DEFAULT_HTTP_HOST, http_port: int = DEFAULT_HTTP_PORT, http_middlewares: List[Any] = []) -> Client: """Initialize a serve instance. By default, the instance will be scoped to the lifetime of the returned Client object (or when the script exits). If detached is set to True, the instance will instead persist until client.shutdown() is called and clients to it can be connected using serve.connect(). This is only relevant if connecting to a long-running Ray cluster (e.g., with address="auto"). Args: detached (bool): Whether not the instance should be detached from this script. http_host (str): Host for HTTP servers to listen on. Defaults to "127.0.0.1". To expose Serve publicly, you probably want to set this to "0.0.0.0". One HTTP server will be started on each node in the Ray cluster. http_port (int): Port for HTTP server. Defaults to 8000. http_middleswares (list): A list of Starlette middlewares that will be applied to the HTTP servers in the cluster. """ # Initialize ray if needed. if not ray.is_initialized(): if detached: # Try to connect to long-running Ray cluster. Error if we can't ray.init(address=os.getenv("RAY_ADDRESS", "auto")) else: ray.init() # Try to get serve controller if it exists if detached: controller_name = SERVE_CONTROLLER_NAME try: ray.get_actor(controller_name) raise RayServeException("Called serve.start(detached=True) but a " "detached instance is already running. " "Please use serve.connect() to connect to " "the running instance instead.") except ValueError: pass else: controller_name = format_actor_name(SERVE_CONTROLLER_NAME, get_random_letters()) controller = ServeController.options( name=controller_name, lifetime="detached" if detached else None, max_restarts=-1, max_task_retries=-1, ).remote( controller_name, http_host, http_port, http_middlewares, detached=detached) futures = [] for node_id in ray.state.node_ids(): future = block_until_http_ready.options( num_cpus=0, resources={ node_id: 0.01 }).remote( "http://{}:{}/-/routes".format(http_host, http_port), timeout=HTTP_PROXY_TIMEOUT) futures.append(future) ray.get(futures) return Client(controller, controller_name, detached=detached)
def start(detached: bool = False, http_host: str = DEFAULT_HTTP_HOST, http_port: int = DEFAULT_HTTP_PORT, http_middlewares: List[Any] = []) -> Client: """Initialize a serve instance. By default, the instance will be scoped to the lifetime of the returned Client object (or when the script exits). If detached is set to True, the instance will instead persist until client.shutdown() is called and clients to it can be connected using serve.connect(). This is only relevant if connecting to a long-running Ray cluster (e.g., with address="auto"). Args: detached (bool): Whether not the instance should be detached from this script. http_host (str): Host for HTTP servers to listen on. Defaults to "127.0.0.1". To expose Serve publicly, you probably want to set this to "0.0.0.0". One HTTP server will be started on each node in the Ray cluster. http_port (int): Port for HTTP server. Defaults to 8000. http_middleswares (list): A list of Starlette middlewares that will be applied to the HTTP servers in the cluster. """ # Initialize ray if needed. if not ray.is_initialized(): if detached: # Try to connect to existing Ray cluster, error if we can't. ray.init(address=os.getenv("RAY_ADDRESS", "auto")) else: ray.init() # Try to get serve controller if it exists if detached: controller_name = SERVE_CONTROLLER_NAME try: ray.get_actor(controller_name) raise RayServeException("Called serve.start(detached=True) but a " "detached instance is already running. " "Please use serve.connect() to connect to " "the running instance instead.") except ValueError: pass else: controller_name = format_actor_name(SERVE_CONTROLLER_NAME, get_random_letters()) controller = ServeController.options( name=controller_name, lifetime="detached" if detached else None, max_restarts=-1, max_task_retries=-1, ).remote( controller_name, http_host, http_port, http_middlewares, detached=detached) futures = [] for node_id in ray.state.node_ids(): future = block_until_http_ready.options( num_cpus=0, resources={ node_id: 0.01 }).remote( "http://{}:{}/-/routes".format(http_host, http_port), timeout=HTTP_PROXY_TIMEOUT) futures.append(future) ray.get(futures) return Client(controller, controller_name, detached=detached)
41,631
def venv_fullname(base_name: str, python_minor: str = None) -> str: if python_minor is None: python_minor = ".".join(str(c) for c in sys.version_info[:2]) return "{}-py{}".format(base_name, python_minor)
def venv_fullname(base_name: str, python_minor: str = None) -> str: if python_minor is None: python_minor = ".".join(str(c) for c in sys.version_info[:2]) return f"{base_name}-py{python_minor}"
45,657
def layout(): return html.Div(id='oncoprint-body', children=[ dash_bio.OncoPrint( id='oncoprint-chart', height=550, data=[] ), html.Div(id='oncoprint-control-tabs', children=[ dcc.Tabs( id='oncoprint-tabs', children=[ dcc.Tab( label='About', value='what-is', children=html.Div(className='oncoprint-tab', children=[ html.H4( "What is OncoPrint?" ), html.P( """ The OncoPrint component is used to view multiple genetic alteration events through an interactive and zoomable heatmap. It is a React/Dash port of the popular oncoPrint() function from the BioConductor R package. Under the hood, the rendering is done using Plotly.js built upon D3. Plotly's interactivity allows the user to bind clicks and hovers to genetic events, allowing the user to create complex bioinformatic apps or workflows that rely on crossfiltering. """ ), html.P( """ Read more about the component here: https://github.com/plotly/react-oncoprint """ ) ]) ), dcc.Tab( label='Data', value='data', children=html.Div(className='oncoprint-tab', children=[ html.Div([ html.Div( className='oncoprint-option-name', children='Select dataset' ), dcc.Dropdown( id='oncoprint-dropdown', className='oncoprint-select', options=[ { 'label': '{}.json'.format(ds), 'value': ds } for ds in DATASETS ], value='cBioPortalData', ), ]), html.Hr( className='oncoprint-separator' ), html.Div([ html.H4('Hover, click, or event data'), html.Div( id='oncoprint-events' ), ]) ]) ), dcc.Tab( label='View', value='view', children=html.Div(className='oncoprint-tab', children=[ html.H4('Layout'), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Overview' ), daq.ToggleSwitch( id='oncoprint-show-overview', label=['hide', 'show'], color='#009DFF', size=35, value=True ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Legend' ), daq.ToggleSwitch( id='oncoprint-show-legend', label=['hide', 'show'], color='#009DFF', size=35, value=True ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Padding' ), dcc.Slider( className='oncoprint-slider', id='oncoprint-padding-input', value=0.05, min=0, max=0.1, step=0.01, marks={ '0': '0', '0.02': '0.02', '0.04': '0.04', '0.06': '0.06', '0.08': '0.08', '0.1': '0.1', }, ), html.Br(), html.Div( 'Adjust the padding (as percentage) ' 'between two tracks.' ), ], ), html.Hr(className='oncoprint-separator'), html.Div([ html.H4('Colors'), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Track color' ), html.P( 'Change the default background ' 'color for the tracks.' ), daq.ColorPicker( id='oncoprint-tracks-color', value={'hex': '#AAAAAA'} ), ], ), html.Hr(className='oncoprint-separator'), html.H6("Mutation colors"), html.P( "Select a mutation type and a color " "to customize its look." ), html.Div(children=[ html.Div( children=[ html.Div( className='oncoprint-option-name', children='Mutation type' ), dcc.Dropdown( id='oncoprint-colorscale-mutation-dropdown', options=[ {'label': mut_type, 'value': mut_type} for mut_type in COLORSCALE_MUTATIONS_OPT ], value=COLORSCALE_MUTATIONS_OPT[0], ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Mutation color' ), daq.ColorPicker( id='oncoprint-mutation-color', value={'hex': COLORSCALE_COLORS_OPT[0]} ) ], ), ]) ]) ]) ) ] ) ]), dcc.Store(id='oncoprint-store'), ]),
def layout(): return html.Div(id='oncoprint-body', children=[ dash_bio.OncoPrint( id='oncoprint-chart', height=550, data=[] ), html.Div(id='oncoprint-control-tabs', children=[ dcc.Tabs( id='oncoprint-tabs', children=[ dcc.Tab( label='About', value='what-is', children=html.Div(className='oncoprint-tab', children=[ html.H4( "What is OncoPrint?" ), html.P( """ The OncoPrint component is used to view multiple genetic alteration events through an interactive and zoomable heatmap. It is a React/Dash port of the popular oncoPrint() function from the Bioconductor R package. Under the hood, the rendering is done using Plotly.js built upon D3. Plotly's interactivity allows the user to bind clicks and hovers to genetic events, allowing the user to create complex bioinformatic apps or workflows that rely on crossfiltering. """ ), html.P( """ Read more about the component here: https://github.com/plotly/react-oncoprint """ ) ]) ), dcc.Tab( label='Data', value='data', children=html.Div(className='oncoprint-tab', children=[ html.Div([ html.Div( className='oncoprint-option-name', children='Select dataset' ), dcc.Dropdown( id='oncoprint-dropdown', className='oncoprint-select', options=[ { 'label': '{}.json'.format(ds), 'value': ds } for ds in DATASETS ], value='cBioPortalData', ), ]), html.Hr( className='oncoprint-separator' ), html.Div([ html.H4('Hover, click, or event data'), html.Div( id='oncoprint-events' ), ]) ]) ), dcc.Tab( label='View', value='view', children=html.Div(className='oncoprint-tab', children=[ html.H4('Layout'), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Overview' ), daq.ToggleSwitch( id='oncoprint-show-overview', label=['hide', 'show'], color='#009DFF', size=35, value=True ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Legend' ), daq.ToggleSwitch( id='oncoprint-show-legend', label=['hide', 'show'], color='#009DFF', size=35, value=True ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Padding' ), dcc.Slider( className='oncoprint-slider', id='oncoprint-padding-input', value=0.05, min=0, max=0.1, step=0.01, marks={ '0': '0', '0.02': '0.02', '0.04': '0.04', '0.06': '0.06', '0.08': '0.08', '0.1': '0.1', }, ), html.Br(), html.Div( 'Adjust the padding (as percentage) ' 'between two tracks.' ), ], ), html.Hr(className='oncoprint-separator'), html.Div([ html.H4('Colors'), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Track color' ), html.P( 'Change the default background ' 'color for the tracks.' ), daq.ColorPicker( id='oncoprint-tracks-color', value={'hex': '#AAAAAA'} ), ], ), html.Hr(className='oncoprint-separator'), html.H6("Mutation colors"), html.P( "Select a mutation type and a color " "to customize its look." ), html.Div(children=[ html.Div( children=[ html.Div( className='oncoprint-option-name', children='Mutation type' ), dcc.Dropdown( id='oncoprint-colorscale-mutation-dropdown', options=[ {'label': mut_type, 'value': mut_type} for mut_type in COLORSCALE_MUTATIONS_OPT ], value=COLORSCALE_MUTATIONS_OPT[0], ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Mutation color' ), daq.ColorPicker( id='oncoprint-mutation-color', value={'hex': COLORSCALE_COLORS_OPT[0]} ) ], ), ]) ]) ]) ) ] ) ]), dcc.Store(id='oncoprint-store'), ]),
2,005
def _weighted_percentile(array, sample_weight, percentile=50, interpolation="nearest"): """Compute weighted percentile Computes lower weighted percentile. If `array` is a 2D array, the `percentile` is computed along the axis 0. .. versionchanged:: 0.24 Accepts 2D `array`. Parameters ---------- array : ndarray of shape (n,) or (n, m) Values to take the weighted percentile of. sample_weight: ndarray of (n,) or (n, m) Weights for each value in `array`. Must be same shape as `array` or of shape `(array.shape[0],)`. percentile: inr or float, default=50 Percentile to compute. Must be value between 0 and 100. interpolation : {"linear", "lower", "higher", "nearest"}, default="lower" The interpolation method to use when the percentile lies between data points `i` and `j`: * `"linear"`: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`; * `"lower"`: i`; * `"higher"`: `j`; * `"nearest"`: `i` or `j`, whichever is nearest (default). .. versionadded: 0.24 Returns ------- percentile_value : float or int if `array` of shape (n,), otherwise\ ndarray of shape (m,) Weighted percentile. """ possible_interpolation = ("linear", "lower", "higher", "nearest") if interpolation not in possible_interpolation: raise ValueError( f"'interpolation' should be one of " f"{', '.join(possible_interpolation)}. Got '{interpolation}' " f"instead." ) if np.any(np.count_nonzero(sample_weight, axis=0) < 1): raise ValueError( "All weights cannot be null when computing a weighted percentile." ) n_dim = array.ndim if n_dim == 0: return array[()] if array.ndim == 1: array = array.reshape((-1, 1)) if (array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]): # when `sample_weight` is 1D, we repeat it for each column of `array` sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T n_rows, n_cols = array.shape sorted_idx = np.argsort(array, axis=0) sorted_weights = _take_along_axis(sample_weight, sorted_idx, axis=0) percentile = np.array([percentile / 100] * n_cols) cum_weigths = stable_cumsum(sorted_weights, axis=0) def _squeeze_arr(arr, n_dim): return arr[0] if n_dim == 1 else arr # Percentile can be computed with 3 different alternative: # https://en.wikipedia.org/wiki/Percentile # These 3 alternatives depend of the value of a parameter C. NumPy uses # the variant where C=0 which allows to obtained a strictly monotically # increasing function which is defined as: # P = (x - 1) / (N - 1); x in [1, N] # Weighted percentile change this formula by taking into account the # weights instead of the data frequency. # P_w = (x - w) / (S_w - w), x in [1, N], w being the weight and S_w being # the sum of the weights. adjusted_percentile = (cum_weigths - sorted_weights) with np.errstate(invalid="ignore"): adjusted_percentile /= cum_weigths[-1] - sorted_weights nan_mask = np.isnan(adjusted_percentile) adjusted_percentile[nan_mask] = 1 if interpolation in ("lower", "higher", "nearest"): percentile_idx = np.array([ np.searchsorted(adjusted_percentile[:, col], percentile[col], side="left") for col in range(n_cols) ]) if interpolation == "lower" and np.all(percentile < 1): # P = 100 is a corner case for "lower" percentile_idx -= 1 elif interpolation == "nearest" and np.all(percentile < 1): for col in range(n_cols): error_higher = abs( adjusted_percentile[percentile_idx[col], col] - percentile[col] ) error_lower = abs( adjusted_percentile[percentile_idx[col] - 1, col] - percentile[col] ) if error_higher >= error_lower: percentile_idx[col] -= 1 percentile_idx = np.apply_along_axis( lambda x: np.clip(x, 0, n_rows - 1), axis=0, arr=percentile_idx ) percentile_value = array[ sorted_idx[percentile_idx, np.arange(n_cols)], np.arange(n_cols) ] percentile_value = _squeeze_arr(percentile_value, n_dim) else: # interpolation == "linear" percentile_value = np.array([ np.interp( x=percentile[col], xp=adjusted_percentile[:, col], fp=array[sorted_idx[:, col], col], ) for col in range(n_cols) ]) percentile_value = _squeeze_arr(percentile_value, n_dim) single_sample_weight = np.count_nonzero(sample_weight, axis=0) if np.any(single_sample_weight == 1): # edge case where a single weight is non-null in which case the # previous methods will fail if not isinstance(percentile_value, Iterable): percentile_value = _squeeze_arr( array[np.nonzero(sample_weight)], n_dim ) else: percentile_value = np.array([ array[np.flatnonzero(sample_weight[:, col])[0], col] if n_nonzero == 1 else percentile_value[col] for col, n_nonzero in enumerate(single_sample_weight) ]) return percentile_value
def _weighted_percentile(array, sample_weight, percentile=50, interpolation="nearest"): """Compute weighted percentile Computes lower weighted percentile. If `array` is a 2D array, the `percentile` is computed along the axis 0. .. versionchanged:: 0.24 Accepts 2D `array`. Parameters ---------- array : ndarray of shape (n,) or (n, m) Values to take the weighted percentile of. sample_weight: ndarray of (n,) or (n, m) Weights for each value in `array`. Must be same shape as `array` or of shape `(array.shape[0],)`. percentile: inr or float, default=50 Percentile to compute. Must be value between 0 and 100. interpolation : {"linear", "lower", "higher", "nearest"}, default="lower" The interpolation method to use when the percentile lies between data points `i` and `j`: * `"linear"`: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`; * `"lower"`: i`; * `"higher"`: `j`; * `"nearest"`: `i` or `j`, whichever is nearest (default). .. versionadded: 0.24 Returns ------- percentile_value : float or int if `array` of shape (n,), otherwise\ ndarray of shape (m,) Weighted percentile. """ possible_interpolation = ("linear", "lower", "higher", "nearest") if interpolation not in possible_interpolation: raise ValueError( f"'interpolation' should be one of " f"{', '.join(possible_interpolation)}. Got '{interpolation}' " f"instead." ) if np.any(np.count_nonzero(sample_weight, axis=0) < 1): raise ValueError( "All weights cannot be null when computing a weighted percentile." ) n_dim = array.ndim if n_dim == 0: return array[()] if array.ndim == 1: array = array.reshape((-1, 1)) if (array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]): # when `sample_weight` is 1D, we repeat it for each column of `array` sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T n_rows, n_cols = array.shape sorted_idx = np.argsort(array, axis=0) sorted_weights = _take_along_axis(sample_weight, sorted_idx, axis=0) percentile = np.array([percentile / 100] * n_cols) cum_weigths = stable_cumsum(sorted_weights, axis=0) def _squeeze_arr(arr, n_dim): return arr[0] if n_dim == 1 else arr # Percentile can be computed with 3 different alternative: # https://en.wikipedia.org/wiki/Percentile # These 3 alternatives depend of the value of a parameter C. NumPy uses # the variant where C=0 which allows to obtain a strictly monotonically # increasing function which is defined as: # P = (x - 1) / (N - 1); x in [1, N] # Weighted percentile change this formula by taking into account the # weights instead of the data frequency. # P_w = (x - w) / (S_w - w), x in [1, N], w being the weight and S_w being # the sum of the weights. adjusted_percentile = (cum_weigths - sorted_weights) with np.errstate(invalid="ignore"): adjusted_percentile /= cum_weigths[-1] - sorted_weights nan_mask = np.isnan(adjusted_percentile) adjusted_percentile[nan_mask] = 1 if interpolation in ("lower", "higher", "nearest"): percentile_idx = np.array([ np.searchsorted(adjusted_percentile[:, col], percentile[col], side="left") for col in range(n_cols) ]) if interpolation == "lower" and np.all(percentile < 1): # P = 100 is a corner case for "lower" percentile_idx -= 1 elif interpolation == "nearest" and np.all(percentile < 1): for col in range(n_cols): error_higher = abs( adjusted_percentile[percentile_idx[col], col] - percentile[col] ) error_lower = abs( adjusted_percentile[percentile_idx[col] - 1, col] - percentile[col] ) if error_higher >= error_lower: percentile_idx[col] -= 1 percentile_idx = np.apply_along_axis( lambda x: np.clip(x, 0, n_rows - 1), axis=0, arr=percentile_idx ) percentile_value = array[ sorted_idx[percentile_idx, np.arange(n_cols)], np.arange(n_cols) ] percentile_value = _squeeze_arr(percentile_value, n_dim) else: # interpolation == "linear" percentile_value = np.array([ np.interp( x=percentile[col], xp=adjusted_percentile[:, col], fp=array[sorted_idx[:, col], col], ) for col in range(n_cols) ]) percentile_value = _squeeze_arr(percentile_value, n_dim) single_sample_weight = np.count_nonzero(sample_weight, axis=0) if np.any(single_sample_weight == 1): # edge case where a single weight is non-null in which case the # previous methods will fail if not isinstance(percentile_value, Iterable): percentile_value = _squeeze_arr( array[np.nonzero(sample_weight)], n_dim ) else: percentile_value = np.array([ array[np.flatnonzero(sample_weight[:, col])[0], col] if n_nonzero == 1 else percentile_value[col] for col, n_nonzero in enumerate(single_sample_weight) ]) return percentile_value
35,058
def lower_ethosu(sch, args, const_dict, name="main"): """Lower a schedule to TIR for the Arm(R) Ethos(TM)-U NPU target. The resulting TIR module will contain a single function that comprises of a sequence of tir.extern_calls to NPU operations. Parameters ---------- sch : tvm.te.Schedule The schedule to be lowered. args : Union[list of tvm.te.Tensor, TEGraph] The input/output tensors. const_dict : dict of int to numpy.ndarray The constant dictionary. name : str, optional The name of the lowered primitive function. Returns ------- mod : tvm.IRModule The lowered TIR module. const_dict : dict of int to numpy.ndarray The modified constant dictionary. """ if not isinstance(args, list): args = list(args.inputs) + list(args.outputs) # config setup curr_pass_ctx = tvm.ir.transform.PassContext.current() curr_cfg = dict() for key, value in curr_pass_ctx.config.items(): curr_cfg[key] = value tir_compiler_cfg = { "tir.LoopPartition": { "partition_const_loop": True, "no_unroll_loop_with_extent_one": True, }, "tir.UnrollLoop": {"auto_max_depth": -1}, } # Merge two configs curr_cfg = {**curr_cfg, **tir_compiler_cfg} sch = sch.normalize() bounds = tvm.te.schedule.InferBound(sch) stmt = tvm.te.schedule.ScheduleOps(sch, bounds, True) compact = tvm.te.schedule.VerifyCompactBuffer(stmt) binds, arg_list = get_binds(args, compact, None) func = tvm.te.schedule.SchedulePostProcToPrimFunc(arg_list, stmt, binds) func = func.with_attr("global_symbol", name) func = func.with_attr("tir.noalias", True) mod = tvm.IRModule({name: func}) with tvm.transform.PassContext(config=curr_cfg): mod = tvm.tir.transform.Simplify()(mod) mod = tvm.tir.transform.StorageFlatten(64)(mod) mod = tvm.tir.transform.UnrollLoop()(mod) mod = tvm.tir.transform.LoopPartition()(mod) mod = RemoveZeroStores()(mod) mod = tvm.tir.transform.Simplify()(mod) mod = tvm.tir.transform.RemoveNoOp()(mod) mod = ReplaceOperators()(mod) mod = tvm.tir.transform.RemoveNoOp()(mod) mod, const_dict = EncodeConstants(const_dict)(mod) mod = tvm.tir.transform.StorageRewrite()(mod) mod = tvm.tir.transform.RemoveNoOp()(mod) return mod, const_dict
def lower_ethosu(sch, args, const_dict, name="main"): """Lower a schedule to TIR for the Arm(R) Ethos(TM)-U NPU target. The resulting TIR module will contain a single function that consists of a sequence of tir.extern_calls to NPU operations. Parameters ---------- sch : tvm.te.Schedule The schedule to be lowered. args : Union[list of tvm.te.Tensor, TEGraph] The input/output tensors. const_dict : dict of int to numpy.ndarray The constant dictionary. name : str, optional The name of the lowered primitive function. Returns ------- mod : tvm.IRModule The lowered TIR module. const_dict : dict of int to numpy.ndarray The modified constant dictionary. """ if not isinstance(args, list): args = list(args.inputs) + list(args.outputs) # config setup curr_pass_ctx = tvm.ir.transform.PassContext.current() curr_cfg = dict() for key, value in curr_pass_ctx.config.items(): curr_cfg[key] = value tir_compiler_cfg = { "tir.LoopPartition": { "partition_const_loop": True, "no_unroll_loop_with_extent_one": True, }, "tir.UnrollLoop": {"auto_max_depth": -1}, } # Merge two configs curr_cfg = {**curr_cfg, **tir_compiler_cfg} sch = sch.normalize() bounds = tvm.te.schedule.InferBound(sch) stmt = tvm.te.schedule.ScheduleOps(sch, bounds, True) compact = tvm.te.schedule.VerifyCompactBuffer(stmt) binds, arg_list = get_binds(args, compact, None) func = tvm.te.schedule.SchedulePostProcToPrimFunc(arg_list, stmt, binds) func = func.with_attr("global_symbol", name) func = func.with_attr("tir.noalias", True) mod = tvm.IRModule({name: func}) with tvm.transform.PassContext(config=curr_cfg): mod = tvm.tir.transform.Simplify()(mod) mod = tvm.tir.transform.StorageFlatten(64)(mod) mod = tvm.tir.transform.UnrollLoop()(mod) mod = tvm.tir.transform.LoopPartition()(mod) mod = RemoveZeroStores()(mod) mod = tvm.tir.transform.Simplify()(mod) mod = tvm.tir.transform.RemoveNoOp()(mod) mod = ReplaceOperators()(mod) mod = tvm.tir.transform.RemoveNoOp()(mod) mod, const_dict = EncodeConstants(const_dict)(mod) mod = tvm.tir.transform.StorageRewrite()(mod) mod = tvm.tir.transform.RemoveNoOp()(mod) return mod, const_dict
20,103
def syntax_err(s, token='', context='', msg=''): err = "syntax" if context: err += " in " err += context if msg: err += ": {}".format(msg) if isinstance(s, str): err += " parsing '{}'".format(s) elif token: err += " near <{}> parsing '{}'".format(token, ' '.join(s)) else: err += " parsing '{}'".format(' '.join(s)) logger.error(err)
def syntax_err(s, token='', context='', msg=''): err = "syntax" if context: err += (" in " + context) if msg: err += ": {}".format(msg) if isinstance(s, str): err += " parsing '{}'".format(s) elif token: err += " near <{}> parsing '{}'".format(token, ' '.join(s)) else: err += " parsing '{}'".format(' '.join(s)) logger.error(err)
54,198
def _est_indp_sp(x): """ Estimate the effective number of independent samples based on the maximum entropy rate principle of stationary random process. Parameters ---------- x : Returns ------- s : entrate_m : """ dimv = x.shape s0 = 0 for j in range(np.min(dimv) - 1): x_sb = _subsampling(x, j + 1, [0, 0, 0]) if j == 0: LGR.info( 'Estimating the entropy rate of the Gaussian component with subsampling depth {},' .format(j)) else: LGR.info(' {},'.format(j)) entrate_m = _entrate_sp(x_sb, 1) ent_ref = 1.41 if entrate_m > ent_ref: s0 = j break LGR.info(' Done;') if s0 == 0: raise ValueError( 'Ill conditioned data, can not estimate independent samples.(_est_indp_sp)' ) else: s = s0 return s, entrate_m
def _est_indp_sp(x): """ Estimate the effective number of independent samples based on the maximum entropy rate principle of stationary random process. Parameters ---------- x : Returns ------- s : entrate_m : """ dimv = x.shape s0 = 0 for j in range(np.min(dimv) - 1): x_sb = _subsampling(x, j + 1, [0, 0, 0]) if j == 0: LGR.info( 'Estimating the entropy rate of the Gaussian component with subsampling depth {},' .format(j)) else: LGR.info(' {},'.format(j)) entrate_m = _entrate_sp(x_sb, 1) ent_ref = 1.41 if entrate_m > ent_ref: s0 = j break if s0 == 0: raise ValueError( 'Ill conditioned data, can not estimate independent samples.(_est_indp_sp)' ) else: s = s0 return s, entrate_m
6,004
def _check_arg_size(function_name, num_cl_args, arg_types, devs): """Check whether argument sizes exceed the OpenCL device limit.""" for dev in devs: dev_ptr_size = int(dev.address_bits / 8) dev_limit = _get_max_parameter_size(dev) total_arg_size = 0 is_estimate = False if arg_types: for arg_type in arg_types: if arg_type is None: is_estimate = True total_arg_size += dev_ptr_size elif isinstance(arg_type, VectorArg): total_arg_size += dev_ptr_size else: total_arg_size += np.dtype(arg_type).itemsize else: # Estimate that each argument has the size of a pointer on average is_estimate = True total_arg_size = dev_ptr_size * num_cl_args if total_arg_size > dev_limit: from warnings import warn warn(f"Kernel '{function_name}' has {num_cl_args} arguments with " f"a total size of {total_arg_size} bytes, which is higher than " f"the limit of {dev_limit} bytes on {dev}. This might " "lead to compilation errors, especially on GPU devices.") elif is_estimate and total_arg_size >= dev_limit * 0.75: # Since total_arg_size is just an estimate, also warn in case we are # just below the actual limit. from warnings import warn warn(f"Kernel '{function_name}' has {num_cl_args} arguments with " f"a total size of {total_arg_size} bytes, which approaches " f"the limit of {dev_limit} bytes on {dev}. This might " "lead to compilation errors, especially on GPU devices.") elif num_cl_args > 50: logger.info( f"Kernel '{function_name}' has {num_cl_args} arguments with " f"a {'total estimated' if is_estimate else 'total'} size of " f"{total_arg_size} bytes.")
def _check_arg_size(function_name, num_cl_args, arg_types, devs): """Check whether argument sizes exceed the OpenCL device limit.""" for dev in devs: dev_ptr_size = int(dev.address_bits / 8) dev_limit = _get_max_parameter_size(dev) total_arg_size = 0 is_estimate = False if arg_types: for arg_type in arg_types: if arg_type is None: is_estimate = True total_arg_size += dev_ptr_size elif isinstance(arg_type, VectorArg): total_arg_size += dev_ptr_size else: total_arg_size += np.dtype(arg_type).itemsize else: # Estimate that each argument has the size of a pointer on average is_estimate = True total_arg_size = dev_ptr_size * num_cl_args if total_arg_size > dev_limit: from warnings import warn warn(f"Kernel '{function_name}' has {num_cl_args} arguments with " f"a total size of {total_arg_size} bytes, which is higher than " f"the limit of {dev_limit} bytes on {dev}. This might " "lead to compilation errors, especially on GPU devices.") elif is_estimate and total_arg_size >= dev_limit * 0.75: # Since total_arg_size is just an estimate, also warn in case we are # just below the actual limit. from warnings import warn warn(f"Kernel '{function_name}' has {num_cl_args} arguments with " f"a total size of {total_arg_size} bytes, which approaches " f"the limit of {dev_limit} bytes on {dev}. This might " "lead to compilation errors, especially on GPU devices.") elif num_cl_args > 50: logger.debug( f"Kernel '{function_name}' has {num_cl_args} arguments with " f"a {'total estimated' if is_estimate else 'total'} size of " f"{total_arg_size} bytes.")
35,325
def convert_apdl_strings(apdl_strings, loglevel="WARNING", auto_exit=True, line_ending=None, exec_file=None, macros_as_functions=True, use_function_names=True, ): """Converts an ANSYS input string to a python PyMAPDL string. Parameters ---------- apdl_string : str APDL strings or list of strings to convert. filename_out : str Filename of the python script to write a translation to. loglevel : str, optional Logging level of the ansys object within the script. auto_exit : bool, optional Adds a line to the end of the script to exit MAPDL. Default ``True``. line_ending : str, optional When None, automatically determined by OS being used. macros_as_functions : bool, optional Attempt to convert MAPDL macros to python functions. use_function_names : bool, optional Convert MAPDL functions to ansys.mapdl.core.Mapdl class methods. When ``True``, the MAPDL command "K" will be converted to ``mapdl.k``. When ``False``, it will be converted to ``mapdl.run('k')``. Returns ------- list List of lines translated. """ translator = _convert(apdl_strings, loglevel=loglevel, auto_exit=auto_exit, line_ending=line_ending, exec_file=exec_file, macros_as_functions=macros_as_functions, use_function_names=use_function_names) if isinstance(apdl_strings, str): return translator.line_ending.join(translator.lines) else: return translator.lines
def convert_apdl_strings(apdl_strings, loglevel="WARNING", auto_exit=True, line_ending=None, exec_file=None, macros_as_functions=True, use_function_names=True, ): """Converts an ANSYS input string to a python PyMAPDL string. Parameters ---------- apdl_string : str APDL strings or list of strings to convert. filename_out : str Filename of the python script to write a translation to. loglevel : str, optional Logging level of the ansys object within the script. auto_exit : bool, optional Adds a line to the end of the script to exit MAPDL. Default ``True``. line_ending : str, optional When None, automatically determined by OS being used. macros_as_functions : bool, optional Attempt to convert MAPDL macros to python functions. use_function_names : bool, optional Convert MAPDL functions to ansys.mapdl.core.Mapdl class methods. When ``True``, the MAPDL command "K" will be converted to ``mapdl.k``. When ``False``, it will be converted to ``mapdl.run('k')``. Returns ------- list List of lines translated. """ translator = _convert(apdl_strings, loglevel=loglevel, auto_exit=auto_exit, line_ending=line_ending, exec_file=exec_file, macros_as_functions=macros_as_functions, use_function_names=use_function_names) if isinstance(apdl_strings, str): return translator.line_ending.join(translator.lines) else: return translator.lines
27,644
def ensure_db(engine, with_permissions=True): """ Initialise the db if needed. Ensures standard users exist. Create the schema if it doesn't exist. """ is_new = False c = engine.connect() quoted_db_name, quoted_user = _get_quoted_connection_info(c) if with_permissions: _LOG.info('Ensuring user roles.') _ensure_role(c, 'agdc_user') _ensure_role(c, 'agdc_ingest', inherits_from='agdc_user') _ensure_role(c, 'agdc_manage', inherits_from='agdc_ingest') _ensure_role(c, 'agdc_admin', inherits_from='agdc_manage', add_user=True) c.execute(""" grant all on database {db} to agdc_admin; """.format(db=quoted_db_name)) if not has_schema(engine, c): is_new = True try: c.execute('begin') if with_permissions: # Switch to 'agdc_admin', so that all items are owned by them. c.execute('set role agdc_admin') _LOG.info('Creating schema.') c.execute(CreateSchema(SCHEMA_NAME)) _LOG.info('Creating tables.') c.execute(TYPES_INIT_SQL) METADATA.create_all(c) _LOG.info("Creating triggers.") install_timestamp_trigger(c) _LOG.info("Creating added column") install_added_column(c) c.execute('commit') except: c.execute('rollback') raise finally: if with_permissions: c.execute('set role {}'.format(quoted_user)) if with_permissions: _LOG.info('Adding role grants.') c.execute(""" grant usage on schema {schema} to agdc_user; grant select on all tables in schema {schema} to agdc_user; grant execute on function {schema}.common_timestamp(text) to agdc_user; grant insert on {schema}.dataset, {schema}.dataset_location, {schema}.dataset_source to agdc_ingest; grant usage, select on all sequences in schema {schema} to agdc_ingest; -- (We're only granting deletion of types that have nothing written yet: they can't delete the data itself) grant insert, delete on {schema}.dataset_type, {schema}.metadata_type to agdc_manage; -- Allow creation of indexes, views grant create on schema {schema} to agdc_manage; """.format(schema=SCHEMA_NAME)) c.close() return is_new
def ensure_db(engine, with_permissions=True): """ Initialise the db if needed. Ensures standard users exist. Create the schema if it doesn't exist. """ is_new = False c = engine.connect() quoted_db_name, quoted_user = _get_quoted_connection_info(c) if with_permissions: _LOG.info('Ensuring user roles.') _ensure_role(c, 'agdc_user') _ensure_role(c, 'agdc_ingest', inherits_from='agdc_user') _ensure_role(c, 'agdc_manage', inherits_from='agdc_ingest') _ensure_role(c, 'agdc_admin', inherits_from='agdc_manage', add_user=True) c.execute(""" grant all on database {db} to agdc_admin; """.format(db=quoted_db_name)) if not has_schema(engine, c): is_new = True try: c.execute('begin') if with_permissions: # Switch to 'agdc_admin', so that all items are owned by them. c.execute('set role agdc_admin') _LOG.info('Creating schema.') c.execute(CreateSchema(SCHEMA_NAME)) _LOG.info('Creating tables.') c.execute(TYPES_INIT_SQL) METADATA.create_all(c) _LOG.info("Creating triggers.") install_timestamp_trigger(c) _LOG.info("Creating added column.") install_added_column(c) c.execute('commit') except: c.execute('rollback') raise finally: if with_permissions: c.execute('set role {}'.format(quoted_user)) if with_permissions: _LOG.info('Adding role grants.') c.execute(""" grant usage on schema {schema} to agdc_user; grant select on all tables in schema {schema} to agdc_user; grant execute on function {schema}.common_timestamp(text) to agdc_user; grant insert on {schema}.dataset, {schema}.dataset_location, {schema}.dataset_source to agdc_ingest; grant usage, select on all sequences in schema {schema} to agdc_ingest; -- (We're only granting deletion of types that have nothing written yet: they can't delete the data itself) grant insert, delete on {schema}.dataset_type, {schema}.metadata_type to agdc_manage; -- Allow creation of indexes, views grant create on schema {schema} to agdc_manage; """.format(schema=SCHEMA_NAME)) c.close() return is_new
40,991
def site_metas(request): """ Context processor to add all information required by Richie CMS templates and frontend. If `CDN_DOMAIN` settings is defined we add it in the context. It allows to load statics js on a CDN like cloudfront. """ site_current = Site.objects.get_current() protocol = "https" if request.is_secure() else "http" authentication_delegation = getattr(settings, "AUTHENTICATION_DELEGATION", False) context = { **{ f"GLIMPSE_PAGINATION_{k.upper()}": v for k, v in { **defaults.GLIMPSE_PAGINATION, **getattr(settings, "RICHIE_GLIMPSE_PAGINATION", {}), }.items() }, "SITE": { "name": site_current.name, "domain": site_current.domain, "web_url": f"{protocol:s}://{site_current.domain:s}", }, "FRONTEND_CONTEXT": { "context": { "csrftoken": get_token(request), "environment": getattr(settings, "ENVIRONMENT", ""), "release": getattr(settings, "RELEASE", ""), "sentry_dsn": getattr(settings, "SENTRY_DSN", ""), } }, } if getattr(settings, "CDN_DOMAIN", False): context["CDN_DOMAIN"] = settings.CDN_DOMAIN if getattr(settings, "AUTHENTICATION_DELEGATION", False): context.update( { "AUTHENTICATION": { "PROFILE_URLS": json.dumps( [ { "label": str(url["label"]), "action": str( url["href"].format( base_url=authentication_delegation["BASE_URL"] ) ), } for url in getattr( authentication_delegation, "PROFILE_URLS", [] ) ] ), } } ) context["FRONTEND_CONTEXT"]["context"].update( { "authentication": { "endpoint": authentication_delegation["BASE_URL"], "backend": authentication_delegation["BACKEND"], } } ) if getattr(settings, "LMS_BACKENDS", False): context["FRONTEND_CONTEXT"]["context"].update( { "lms_backends": [ { "endpoint": lms["BASE_URL"], "backend": lms["BACKEND"], "course_regexp": lms["JS_COURSE_REGEX"], "selector_regexp": lms["JS_SELECTOR_REGEX"], } for lms in getattr(settings, "LMS_BACKENDS", []) ] } ), context["FRONTEND_CONTEXT"] = json.dumps(context["FRONTEND_CONTEXT"]) return context
def site_metas(request): """ Context processor to add all information required by Richie CMS templates and frontend. If `CDN_DOMAIN` settings is defined we add it in the context. It allows to load statics js on a CDN like cloudfront. """ site_current = Site.objects.get_current() protocol = "https" if request.is_secure() else "http" authentication_delegation = getattr(settings, "AUTHENTICATION_DELEGATION", None) context = { **{ f"GLIMPSE_PAGINATION_{k.upper()}": v for k, v in { **defaults.GLIMPSE_PAGINATION, **getattr(settings, "RICHIE_GLIMPSE_PAGINATION", {}), }.items() }, "SITE": { "name": site_current.name, "domain": site_current.domain, "web_url": f"{protocol:s}://{site_current.domain:s}", }, "FRONTEND_CONTEXT": { "context": { "csrftoken": get_token(request), "environment": getattr(settings, "ENVIRONMENT", ""), "release": getattr(settings, "RELEASE", ""), "sentry_dsn": getattr(settings, "SENTRY_DSN", ""), } }, } if getattr(settings, "CDN_DOMAIN", False): context["CDN_DOMAIN"] = settings.CDN_DOMAIN if getattr(settings, "AUTHENTICATION_DELEGATION", False): context.update( { "AUTHENTICATION": { "PROFILE_URLS": json.dumps( [ { "label": str(url["label"]), "action": str( url["href"].format( base_url=authentication_delegation["BASE_URL"] ) ), } for url in getattr( authentication_delegation, "PROFILE_URLS", [] ) ] ), } } ) context["FRONTEND_CONTEXT"]["context"].update( { "authentication": { "endpoint": authentication_delegation["BASE_URL"], "backend": authentication_delegation["BACKEND"], } } ) if getattr(settings, "LMS_BACKENDS", False): context["FRONTEND_CONTEXT"]["context"].update( { "lms_backends": [ { "endpoint": lms["BASE_URL"], "backend": lms["BACKEND"], "course_regexp": lms["JS_COURSE_REGEX"], "selector_regexp": lms["JS_SELECTOR_REGEX"], } for lms in getattr(settings, "LMS_BACKENDS", []) ] } ), context["FRONTEND_CONTEXT"] = json.dumps(context["FRONTEND_CONTEXT"]) return context
59,265
def _determine_linux_fastcopy_blocksize(infd): """Determine blocksize for fastcopying on Linux. Hopefully the whole file will be copied in a single call. The copying itself should be performed in a loop 'till EOF is reached (0 return) so a blocksize smaller or bigger than the actual file size should not make any difference, also in case the file content changes while being copied. """ try: blocksize = max(os.fstat(infd).st_size, 2 ** 23) # min 8MiB except OSError: blocksize = 2 ** 27 # 128MiB # On 32-bit architectures truncate to 1GiB to avoid OverflowError, # see bpo-38319. if sys.maxsize < 2 ** 32: blocksize = min(blocksize, 2 ** 30) return blocksize
def _determine_linux_fastcopy_blocksize(infd): """Determine blocksize for fastcopying on Linux. Hopefully the whole file will be copied in a single call. The copying itself should be performed in a loop 'till EOF is reached (0 return) so a blocksize smaller or bigger than the actual file size should not make any difference, also in case the file content changes while being copied. """ try: blocksize = max(os.fstat(infd).st_size, 2 ** 23) # min 8 MiB except OSError: blocksize = 2 ** 27 # 128 MiB # On 32-bit architectures truncate to 1 GiB to avoid OverflowError, # see bpo-38319. if sys.maxsize < 2 ** 32: blocksize = min(blocksize, 2 ** 30) return blocksize
33,590
def test_get_webui(): addresses = ray.init(include_webui=True) webui_url = addresses['webui_url'] assert ray.worker.get_webui_url() == webui_url ray.shutdown()
def test_get_webui(): addresses = ray.init(include_webui=True) webui_url = addresses["webui_url"] assert ray.worker.get_webui_url() == webui_url ray.shutdown()
7,249
def _clahe(image, kernel_size, clip_limit, nbins): """Contrast Limited Adaptive Histogram Equalization. Parameters ---------- image : (N1,...,NN) ndarray Input image. kernel_size: int or N-tuple of int Defines the shape of contextual regions used in the algorithm. clip_limit : float Normalized clipping limit between 0 and 1 (higher values give more contrast). nbins : int Number of gray bins for histogram ("data range"). Returns ------- out : (N1,...,NN) ndarray Equalized image. The number of "effective" graylevels in the output image is set by `nbins`; selecting a small value (eg. 128) speeds up processing and still produce an output image of good quality. A clip limit of 0 or larger equal 1 results in standard (non-contrast limited) AHE. """ ndim = image.ndim dtype = image.dtype # pad the image such that the shape in each dimension # - is a multiple of the kernel_size and # - is preceded by half a kernel size pad_start_per_dim = [k // 2 for k in kernel_size] pad_end_per_dim = [(k - s % k) % k + int(np.ceil(k / 2.)) for k, s in zip(kernel_size, image.shape)] image = np.pad(image, [[p_i, p_f] for p_i, p_f in zip(pad_start_per_dim, pad_end_per_dim)], mode='reflect') # determine gray value bins bin_size = 1 + NR_OF_GRAY // nbins lut = np.arange(NR_OF_GRAY) lut //= bin_size image = lut[image] # calculate graylevel mappings for each contextual region # rearrange image into flattened contextual regions ns_hist = [int(s / k) - 1 for s, k in zip(image.shape, kernel_size)] hist_blocks_shape = np.array([ns_hist, kernel_size]).T.flatten() hist_blocks_axis_order = np.array([np.arange(0, ndim * 2, 2), np.arange(1, ndim * 2, 2)]).flatten() hist_slices = [slice(k // 2, k // 2 + n * k) for k, n in zip(kernel_size, ns_hist)] hist_blocks = image[tuple(hist_slices)].reshape(hist_blocks_shape) hist_blocks = np.transpose(hist_blocks, axes=hist_blocks_axis_order) hist_block_assembled_shape = hist_blocks.shape hist_blocks = hist_blocks.reshape((np.product(ns_hist), -1)) # Calculate actual clip limit if clip_limit > 0.0: clim = int(np.clip(clip_limit * np.product(kernel_size), 1, None)) else: # largest possible value, i.e., do not clip (AHE) clim = np.product(kernel_size) hist = np.apply_along_axis(np.bincount, -1, hist_blocks, minlength=nbins) hist = np.apply_along_axis(clip_histogram, -1, hist, clip_limit=clim) hist = map_histogram(hist, 0, NR_OF_GRAY - 1, np.product(kernel_size)) hist = hist.reshape(hist_block_assembled_shape[:ndim] + (-1,)) # duplicate leading mappings in each dim map_array = np.pad(hist, [[1, 1] for _ in range(ndim)] + [[0, 0]], mode='edge') # Perform multilinear interpolation of graylevel mappings # using the convention described here: # https://en.wikipedia.org/w/index.php?title=Adaptive_histogram_ # equalization&oldid=936814673#Efficient_computation_by_interpolation # rearrange image into blocks for vectorized processing ns_proc = [int(s / k) for s, k in zip(image.shape, kernel_size)] blocks_shape = np.array([ns_proc, kernel_size]).T.flatten() blocks_axis_order = np.array([np.arange(0, ndim * 2, 2), np.arange(1, ndim * 2, 2)]).flatten() blocks = image.reshape(blocks_shape) blocks = np.transpose(blocks, axes=blocks_axis_order) blocks_flattened_shape = blocks.shape blocks = np.reshape(blocks, (np.product(ns_proc), np.product(blocks.shape[ndim:]))) # calculate interpolation coefficients coeffs = np.meshgrid(*tuple([np.arange(k) / k for k in kernel_size[::-1]]), indexing='ij') coeffs = [np.transpose(c).flatten() for c in coeffs] inv_coeffs = [1 - c for dim, c in enumerate(coeffs)] # sum over contributions of neighboring contextual # regions in each direction result = np.zeros(blocks.shape, dtype=np.float32) for iedge, edge in enumerate(np.ndindex(*([2] * ndim))): edge_maps = map_array[tuple([slice(e, e + n) for e, n in zip(edge, ns_proc)])] edge_maps = edge_maps.reshape((np.product(ns_proc), -1)) # apply map edge_mapped = np.take_along_axis(edge_maps, blocks, axis=-1) # interpolate edge_coeffs = np.product([[inv_coeffs, coeffs][e][d] for d, e in enumerate(edge[::-1])], 0) result += (edge_mapped * edge_coeffs).astype(result.dtype) result = result.astype(dtype) # rebuild result image from blocks result = result.reshape(blocks_flattened_shape) blocks_axis_rebuild_order =\ np.array([np.arange(0, ndim), np.arange(ndim, ndim * 2)]).T.flatten() result = np.transpose(result, axes=blocks_axis_rebuild_order) result = result.reshape(image.shape) # undo padding unpad_slices = tuple([slice(p_i, s - p_f) for p_i, p_f, s in zip(pad_start_per_dim, pad_end_per_dim, image.shape)]) result = result[unpad_slices] return result
def _clahe(image, kernel_size, clip_limit, nbins): """Contrast Limited Adaptive Histogram Equalization. Parameters ---------- image : (N1,...,NN) ndarray Input image. kernel_size: int or N-tuple of int Defines the shape of contextual regions used in the algorithm. clip_limit : float Normalized clipping limit between 0 and 1 (higher values give more contrast). nbins : int Number of gray bins for histogram ("data range"). Returns ------- out : (N1,...,NN) ndarray Equalized image. The number of "effective" graylevels in the output image is set by `nbins`; selecting a small value (e.g. 128) speeds up processing and still produces an output image of good quality. A clip limit of 0 or larger equal 1 results in standard (non-contrast limited) AHE. """ ndim = image.ndim dtype = image.dtype # pad the image such that the shape in each dimension # - is a multiple of the kernel_size and # - is preceded by half a kernel size pad_start_per_dim = [k // 2 for k in kernel_size] pad_end_per_dim = [(k - s % k) % k + int(np.ceil(k / 2.)) for k, s in zip(kernel_size, image.shape)] image = np.pad(image, [[p_i, p_f] for p_i, p_f in zip(pad_start_per_dim, pad_end_per_dim)], mode='reflect') # determine gray value bins bin_size = 1 + NR_OF_GRAY // nbins lut = np.arange(NR_OF_GRAY) lut //= bin_size image = lut[image] # calculate graylevel mappings for each contextual region # rearrange image into flattened contextual regions ns_hist = [int(s / k) - 1 for s, k in zip(image.shape, kernel_size)] hist_blocks_shape = np.array([ns_hist, kernel_size]).T.flatten() hist_blocks_axis_order = np.array([np.arange(0, ndim * 2, 2), np.arange(1, ndim * 2, 2)]).flatten() hist_slices = [slice(k // 2, k // 2 + n * k) for k, n in zip(kernel_size, ns_hist)] hist_blocks = image[tuple(hist_slices)].reshape(hist_blocks_shape) hist_blocks = np.transpose(hist_blocks, axes=hist_blocks_axis_order) hist_block_assembled_shape = hist_blocks.shape hist_blocks = hist_blocks.reshape((np.product(ns_hist), -1)) # Calculate actual clip limit if clip_limit > 0.0: clim = int(np.clip(clip_limit * np.product(kernel_size), 1, None)) else: # largest possible value, i.e., do not clip (AHE) clim = np.product(kernel_size) hist = np.apply_along_axis(np.bincount, -1, hist_blocks, minlength=nbins) hist = np.apply_along_axis(clip_histogram, -1, hist, clip_limit=clim) hist = map_histogram(hist, 0, NR_OF_GRAY - 1, np.product(kernel_size)) hist = hist.reshape(hist_block_assembled_shape[:ndim] + (-1,)) # duplicate leading mappings in each dim map_array = np.pad(hist, [[1, 1] for _ in range(ndim)] + [[0, 0]], mode='edge') # Perform multilinear interpolation of graylevel mappings # using the convention described here: # https://en.wikipedia.org/w/index.php?title=Adaptive_histogram_ # equalization&oldid=936814673#Efficient_computation_by_interpolation # rearrange image into blocks for vectorized processing ns_proc = [int(s / k) for s, k in zip(image.shape, kernel_size)] blocks_shape = np.array([ns_proc, kernel_size]).T.flatten() blocks_axis_order = np.array([np.arange(0, ndim * 2, 2), np.arange(1, ndim * 2, 2)]).flatten() blocks = image.reshape(blocks_shape) blocks = np.transpose(blocks, axes=blocks_axis_order) blocks_flattened_shape = blocks.shape blocks = np.reshape(blocks, (np.product(ns_proc), np.product(blocks.shape[ndim:]))) # calculate interpolation coefficients coeffs = np.meshgrid(*tuple([np.arange(k) / k for k in kernel_size[::-1]]), indexing='ij') coeffs = [np.transpose(c).flatten() for c in coeffs] inv_coeffs = [1 - c for dim, c in enumerate(coeffs)] # sum over contributions of neighboring contextual # regions in each direction result = np.zeros(blocks.shape, dtype=np.float32) for iedge, edge in enumerate(np.ndindex(*([2] * ndim))): edge_maps = map_array[tuple([slice(e, e + n) for e, n in zip(edge, ns_proc)])] edge_maps = edge_maps.reshape((np.product(ns_proc), -1)) # apply map edge_mapped = np.take_along_axis(edge_maps, blocks, axis=-1) # interpolate edge_coeffs = np.product([[inv_coeffs, coeffs][e][d] for d, e in enumerate(edge[::-1])], 0) result += (edge_mapped * edge_coeffs).astype(result.dtype) result = result.astype(dtype) # rebuild result image from blocks result = result.reshape(blocks_flattened_shape) blocks_axis_rebuild_order =\ np.array([np.arange(0, ndim), np.arange(ndim, ndim * 2)]).T.flatten() result = np.transpose(result, axes=blocks_axis_rebuild_order) result = result.reshape(image.shape) # undo padding unpad_slices = tuple([slice(p_i, s - p_f) for p_i, p_f, s in zip(pad_start_per_dim, pad_end_per_dim, image.shape)]) result = result[unpad_slices] return result
43,585
def QAOAEmbedding(features, weights, wires, local_field='Y'): r""" Encodes :math:`N` features into :math:`n` qubits, using a layered, trainable quantum circuit that is inspired by the QAOA ansatz. A single layer applies two circuits or "Hamiltonians": The first encodes the features, and the second is a variational ansatz inspired by a 1-dimensional Ising model. The feature-encoding circuit associates features with the angles of :class:`RX` rotations. The Ising ansatz consists of trainable two-qubit ZZ interactions :math:`e^{-i \alpha \sigma_z \otimes \sigma_z}`, and trainable local fields :math:`e^{-i \frac{\beta}{2} \sigma_{\mu}}`, where :math:`\sigma_{\mu}` can be chosen to be :math:`\sigma_{x}`, :math:`\sigma_{y}` or :math:`\sigma_{z}` (default choice is :math:`\sigma_{y}` or the ``RY`` gate). :math:`\alpha, \beta` are adjustable gate parameters. The number of features has to be smaller or equal to the number of qubits. If there are fewer features than qubits, the feature-encoding rotation is replaced by a Hadamard gate. This is an example for the full embedding circuit using 2 layers, 3 features, 4 wires, and ``RY`` local fields: | .. figure:: ../../_static/qaoa_layers.png :align: center :width: 60% :target: javascript:void(0); | The argument ``weights`` contains an array of the :math:`\alpha, \beta` parameters for each layer. The number of layers :math:`L` is derived from the first dimension of ``weights``. If the embedding acts on a single wire, ``weights`` has shape :math:`(L, )`, if the embedding acts on two wires, it has shape :math:`(L, 3)`, and else it has shape :math:`(L, 2n)` After the :math:`L`th layer, another set of feature encoding :class:`RX` gates is applied. .. note:: ``QAOAEmbedding`` supports gradient computations with respect to both the ``features`` and the ``weights`` arguments. Note that trainable parameters need to be passed to the quantum node as positional arguments. Args: features (array): array of features to encode weights (array): array of weights wires (Sequence[int] or int): `n` qubit indices that the template acts on local_field (str): type of local field used, one of ``'X'``, ``'Y'``, or ``'Z'`` Raises: ValueError: if inputs do not have the correct format .. UsageDetails:: The QAOA embedding encodes an :math:`n`-dimensional feature vector into at most :math:`n` qubits. The embedding applies layers of a circuit, and each layer is defined by a set of weight parameters. .. code-block:: python import pennylane as qml from pennylane.templates import QAOAEmbedding dev = qml.device('default.qubit', wires=2) @qml.qnode(dev) def circuit(weights, f=None): QAOAEmbedding(features=f, weights=weights, wires=range(2)) return qml.expval(qml.PauliZ(0)) features = [1., 2.] layer1 = [0.1, -0.3, 1.5] layer2 = [3.1, 0.2, -2.8] weights = [layer1, layer2] print(circuit(weights, f=features)) **Using parameter initialization functions** The initial weight parameters can alternatively be generated by utility functions from the ``pennylane.init`` module, for example using the function :func:`qaoa_embedding_normal`: .. code-block:: python from pennylane.init import qaoa_embedding_normal weights = qaoa_embedding_normal(n_layers=2, n_wires=2, mean=0, std=0.2) **Training the embedding** The embedding is typically trained with respect to a given cost. For example, one can train it to minimize the PauliZ expectation of the first qubit: .. code-block:: python o = GradientDescentOptimizer() for i in range(10): weights = o.step(lambda w : circuit(w, f=features), weights) print("Step ", i, " weights = ", weights) **Training the features** In principle, also the features are trainable, which means that gradients with respect to feature values can be computed. To train both weights and features, they need to be passed to the qnode as positional arguments. If the built-in optimizer is used, they have to be merged to one input: .. code-block:: python @qml.qnode(dev) def circuit2(pars): weights = pars[0] features = pars[1] QAOAEmbedding(features=features, weights=weights, wires=range(2)) return qml.expval(qml.PauliZ(0)) features = [1., 2.] weights = [[0.1, -0.3, 1.5], [3.1, 0.2, -2.8]] pars = [weights, features] o = GradientDescentOptimizer() for i in range(10): pars = o.step(circuit2, pars) print("Step ", i, " weights = ", pars[0], " features = ", pars[1]) **Local Fields** While by default, ``RY`` gates are used as local fields, one may also choose ``local_field='Z'`` or ``local_field='X'`` as hyperparameters of the embedding. .. code-block:: python @qml.qnode(dev) def circuit(weights, f=None): QAOAEmbedding(features=f, weights=weights, wires=range(2), local_field='Z') return qml.expval(qml.PauliZ(0)) Choosing ``'Z'`` fields implements a QAOAEmbedding where the second Hamiltonian is a 1-dimensional Ising model. """ ############# # Input checks wires, n_wires = _check_wires(wires) n_features = _get_shape(features)[0] msg = "QAOAEmbedding cannot process more features than number of qubits {};" \ "got {}.".format(n_wires, len(features)) _check_shape(features, (n_wires,), bound='max', msg=msg) msg = "Option for local field not known. Has to be one of ``'X'``, ``'Y'``, or ``'Z'``." _check_hyperp_is_in_options(local_field, ['X', 'Y', 'Z'], msg=msg) if local_field == 'Z': local_field = RZ elif local_field == 'X': local_field = RX else: local_field = RY repeat = _check_number_of_layers([weights]) weights = np.array(weights) weights_shape = weights.shape if n_wires == 1: msg = "QAOAEmbedding with 1 qubit and {} layers requires weight " \ "array of shape {}; got {}".format(repeat, (repeat, 1), weights_shape) _check_shape(weights, (repeat, 1), msg=msg) elif n_wires == 2: msg = "QAOAEmbedding with 2 qubits and {} layers requires weight " \ "array of shape {}; got {}".format(repeat, (repeat, 3), weights_shape) _check_shape(weights, (repeat, 3), msg=msg) else: msg = "QAOAEmbedding with {} qubits and {} layers requires weight " \ "array of shape {}; got {}".format(n_wires, repeat, (repeat, 2*n_wires), weights_shape) _check_shape(weights, (repeat, 2*n_wires), msg=msg) ##################### for l in range(repeat): # encode inputs into RX gates for i in range(n_wires): # Either feed in feature if i < n_features: RX(features[i], wires=wires[i]) # or a Hadamard else: Hadamard(wires=wires[i]) # trainable "Ising" ansatz if n_wires == 1: local_field(weights[l, 0], wires=wires[0]) elif n_wires == 2: CNOT(wires=[wires[0], wires[1]]) RZ(2 * weights[l, 0], wires=wires[0]) CNOT(wires=[wires[0], wires[1]]) # local fields for i in range(n_wires): local_field(weights[l, i+1], wires=wires[i]) else: for i in range(n_wires): if i < n_wires - 1: CNOT(wires=[wires[i], wires[i + 1]]) RZ(2 * weights[l, i], wires=wires[i]) CNOT(wires=[wires[i], wires[i + 1]]) else: # enforce periodic boundary condition CNOT(wires=[wires[i], wires[0]]) RZ(2 * weights[l, i], wires=wires[i]) CNOT(wires=[wires[i], wires[0]]) # local fields for i in range(n_wires): local_field(weights[l, n_wires + i], wires=wires[i]) # repeat feature encoding once more at the end for i in range(n_wires): # Either feed in feature if i < n_features: RX(features[i], wires=wires[i]) # or a Hadamard else: Hadamard(wires=wires[i])
def QAOAEmbedding(features, weights, wires, local_field='Y'): r""" Encodes :math:`N` features into :math:`n` qubits, using a layered, trainable quantum circuit that is inspired by the QAOA ansatz. A single layer applies two circuits or "Hamiltonians": The first encodes the features, and the second is a variational ansatz inspired by a 1-dimensional Ising model. The feature-encoding circuit associates features with the angles of :class:`RX` rotations. The Ising ansatz consists of trainable two-qubit ZZ interactions :math:`e^{-i \alpha \sigma_z \otimes \sigma_z}`, and trainable local fields :math:`e^{-i \frac{\beta}{2} \sigma_{\mu}}`, where :math:`\sigma_{\mu}` can be chosen to be :math:`\sigma_{x}`, :math:`\sigma_{y}` or :math:`\sigma_{z}` (default choice is :math:`\sigma_{y}` or the ``RY`` gate). :math:`\alpha, \beta` are adjustable gate parameters. The number of features has to be smaller or equal to the number of qubits. If there are fewer features than qubits, the feature-encoding rotation is replaced by a Hadamard gate. This is an example for the full embedding circuit using 2 layers, 3 features, 4 wires, and ``RY`` local fields: | .. figure:: ../../_static/qaoa_layers.png :align: center :width: 60% :target: javascript:void(0); | The argument ``weights`` contains an array of the :math:`\alpha, \beta` parameters for each layer. The number of layers :math:`L` is derived from the first dimension of ``weights``. If the embedding acts on a single wire, ``weights`` has shape :math:`(L, )`, if the embedding acts on two wires, it has shape :math:`(L, 3)`, and else it has shape :math:`(L, 2n)` After the :math:`L`th layer, another set of feature encoding :class:`RX` gates is applied. .. note:: ``QAOAEmbedding`` supports gradient computations with respect to both the ``features`` and the ``weights`` arguments. Note that trainable parameters need to be passed to the quantum node as positional arguments. Args: features (array): array of features to encode weights (array): array of weights wires (Sequence[int] or int): `n` qubit indices that the template acts on local_field (str): type of local field used, one of ``'X'``, ``'Y'``, or ``'Z'`` Raises: ValueError: if inputs do not have the correct format .. UsageDetails:: The QAOA embedding encodes an :math:`n`-dimensional feature vector into at most :math:`n` qubits. The embedding applies layers of a circuit, and each layer is defined by a set of weight parameters. .. code-block:: python import pennylane as qml from pennylane.templates import QAOAEmbedding dev = qml.device('default.qubit', wires=2) @qml.qnode(dev) def circuit(weights, f=None): QAOAEmbedding(features=f, weights=weights, wires=range(2)) return qml.expval(qml.PauliZ(0)) features = [1., 2.] layer1 = [0.1, -0.3, 1.5] layer2 = [3.1, 0.2, -2.8] weights = [layer1, layer2] print(circuit(weights, f=features)) **Using parameter initialization functions** The initial weight parameters can alternatively be generated by utility functions from the ``pennylane.init`` module, for example using the function :func:`~.qaoa_embedding_normal`: .. code-block:: python from pennylane.init import qaoa_embedding_normal weights = qaoa_embedding_normal(n_layers=2, n_wires=2, mean=0, std=0.2) **Training the embedding** The embedding is typically trained with respect to a given cost. For example, one can train it to minimize the PauliZ expectation of the first qubit: .. code-block:: python o = GradientDescentOptimizer() for i in range(10): weights = o.step(lambda w : circuit(w, f=features), weights) print("Step ", i, " weights = ", weights) **Training the features** In principle, also the features are trainable, which means that gradients with respect to feature values can be computed. To train both weights and features, they need to be passed to the qnode as positional arguments. If the built-in optimizer is used, they have to be merged to one input: .. code-block:: python @qml.qnode(dev) def circuit2(pars): weights = pars[0] features = pars[1] QAOAEmbedding(features=features, weights=weights, wires=range(2)) return qml.expval(qml.PauliZ(0)) features = [1., 2.] weights = [[0.1, -0.3, 1.5], [3.1, 0.2, -2.8]] pars = [weights, features] o = GradientDescentOptimizer() for i in range(10): pars = o.step(circuit2, pars) print("Step ", i, " weights = ", pars[0], " features = ", pars[1]) **Local Fields** While by default, ``RY`` gates are used as local fields, one may also choose ``local_field='Z'`` or ``local_field='X'`` as hyperparameters of the embedding. .. code-block:: python @qml.qnode(dev) def circuit(weights, f=None): QAOAEmbedding(features=f, weights=weights, wires=range(2), local_field='Z') return qml.expval(qml.PauliZ(0)) Choosing ``'Z'`` fields implements a QAOAEmbedding where the second Hamiltonian is a 1-dimensional Ising model. """ ############# # Input checks wires, n_wires = _check_wires(wires) n_features = _get_shape(features)[0] msg = "QAOAEmbedding cannot process more features than number of qubits {};" \ "got {}.".format(n_wires, len(features)) _check_shape(features, (n_wires,), bound='max', msg=msg) msg = "Option for local field not known. Has to be one of ``'X'``, ``'Y'``, or ``'Z'``." _check_hyperp_is_in_options(local_field, ['X', 'Y', 'Z'], msg=msg) if local_field == 'Z': local_field = RZ elif local_field == 'X': local_field = RX else: local_field = RY repeat = _check_number_of_layers([weights]) weights = np.array(weights) weights_shape = weights.shape if n_wires == 1: msg = "QAOAEmbedding with 1 qubit and {} layers requires weight " \ "array of shape {}; got {}".format(repeat, (repeat, 1), weights_shape) _check_shape(weights, (repeat, 1), msg=msg) elif n_wires == 2: msg = "QAOAEmbedding with 2 qubits and {} layers requires weight " \ "array of shape {}; got {}".format(repeat, (repeat, 3), weights_shape) _check_shape(weights, (repeat, 3), msg=msg) else: msg = "QAOAEmbedding with {} qubits and {} layers requires weight " \ "array of shape {}; got {}".format(n_wires, repeat, (repeat, 2*n_wires), weights_shape) _check_shape(weights, (repeat, 2*n_wires), msg=msg) ##################### for l in range(repeat): # encode inputs into RX gates for i in range(n_wires): # Either feed in feature if i < n_features: RX(features[i], wires=wires[i]) # or a Hadamard else: Hadamard(wires=wires[i]) # trainable "Ising" ansatz if n_wires == 1: local_field(weights[l, 0], wires=wires[0]) elif n_wires == 2: CNOT(wires=[wires[0], wires[1]]) RZ(2 * weights[l, 0], wires=wires[0]) CNOT(wires=[wires[0], wires[1]]) # local fields for i in range(n_wires): local_field(weights[l, i+1], wires=wires[i]) else: for i in range(n_wires): if i < n_wires - 1: CNOT(wires=[wires[i], wires[i + 1]]) RZ(2 * weights[l, i], wires=wires[i]) CNOT(wires=[wires[i], wires[i + 1]]) else: # enforce periodic boundary condition CNOT(wires=[wires[i], wires[0]]) RZ(2 * weights[l, i], wires=wires[i]) CNOT(wires=[wires[i], wires[0]]) # local fields for i in range(n_wires): local_field(weights[l, n_wires + i], wires=wires[i]) # repeat feature encoding once more at the end for i in range(n_wires): # Either feed in feature if i < n_features: RX(features[i], wires=wires[i]) # or a Hadamard else: Hadamard(wires=wires[i])
10,487
def install(m, pkgspec, cache, upgrade=False, default_release=None, install_recommends=None, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), build_dep=False, fixed=False, autoremove=False, no_remove=False, only_upgrade=False, allow_unauthenticated=False): pkg_list = [] packages = "" pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) package_names = [] for package in pkgspec: if build_dep: # Let apt decide what to install pkg_list.append("'%s'" % package) continue name, version = package_split(package) package_names.append(name) installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='install') if (not installed and not only_upgrade) or (installed and not installed_version) or (upgrade and upgradable): pkg_list.append("'%s'" % package) if installed_version and upgradable and version: # This happens when the package is installed, a newer version is # available, and the version is a wildcard that matches both # # We do not apply the upgrade flag because we cannot specify both # a version and state=latest. (This behaviour mirrors how apt # treats a version with wildcard in the package) pkg_list.append("'%s'" % package) packages = ' '.join(pkg_list) if packages: if force: force_yes = '--force-yes' else: force_yes = '' if m.check_mode: check_arg = '--simulate' else: check_arg = '' if autoremove: autoremove = '--auto-remove' else: autoremove = '' if no_remove: no_remove = '--no-remove' else: no_remove = '' if only_upgrade: only_upgrade = '--only-upgrade' else: only_upgrade = '' if fixed: fixed = '--fix-broken' else: fixed = '' if build_dep: cmd = "%s -y %s %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, no_remove, check_arg, packages) else: cmd = "%s -y %s %s %s %s %s %s %s install %s" % \ (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, no_remove, check_arg, packages) if default_release: cmd += " -t '%s'" % (default_release,) if install_recommends is False: cmd += " -o APT::Install-Recommends=no" elif install_recommends is True: cmd += " -o APT::Install-Recommends=yes" # install_recommends is None uses the OS default if allow_unauthenticated: cmd += " --allow-unauthenticated" with PolicyRcD(m): rc, out, err = m.run_command(cmd) if m._diff: diff = parse_diff(out) else: diff = {} status = True changed = True if build_dep: changed = APT_GET_ZERO not in out data = dict(changed=changed, stdout=out, stderr=err, diff=diff) if rc: status = False data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc) else: status = True data = dict(changed=False) if not build_dep: mark_installed_manually(m, package_names) return (status, data)
def install(m, pkgspec, cache, upgrade=False, default_release=None, install_recommends=None, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), build_dep=False, fixed=False, autoremove=False, fail_on_autoremove=False, only_upgrade=False, allow_unauthenticated=False): pkg_list = [] packages = "" pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) package_names = [] for package in pkgspec: if build_dep: # Let apt decide what to install pkg_list.append("'%s'" % package) continue name, version = package_split(package) package_names.append(name) installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='install') if (not installed and not only_upgrade) or (installed and not installed_version) or (upgrade and upgradable): pkg_list.append("'%s'" % package) if installed_version and upgradable and version: # This happens when the package is installed, a newer version is # available, and the version is a wildcard that matches both # # We do not apply the upgrade flag because we cannot specify both # a version and state=latest. (This behaviour mirrors how apt # treats a version with wildcard in the package) pkg_list.append("'%s'" % package) packages = ' '.join(pkg_list) if packages: if force: force_yes = '--force-yes' else: force_yes = '' if m.check_mode: check_arg = '--simulate' else: check_arg = '' if autoremove: autoremove = '--auto-remove' else: autoremove = '' if no_remove: no_remove = '--no-remove' else: no_remove = '' if only_upgrade: only_upgrade = '--only-upgrade' else: only_upgrade = '' if fixed: fixed = '--fix-broken' else: fixed = '' if build_dep: cmd = "%s -y %s %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, no_remove, check_arg, packages) else: cmd = "%s -y %s %s %s %s %s %s %s install %s" % \ (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, no_remove, check_arg, packages) if default_release: cmd += " -t '%s'" % (default_release,) if install_recommends is False: cmd += " -o APT::Install-Recommends=no" elif install_recommends is True: cmd += " -o APT::Install-Recommends=yes" # install_recommends is None uses the OS default if allow_unauthenticated: cmd += " --allow-unauthenticated" with PolicyRcD(m): rc, out, err = m.run_command(cmd) if m._diff: diff = parse_diff(out) else: diff = {} status = True changed = True if build_dep: changed = APT_GET_ZERO not in out data = dict(changed=changed, stdout=out, stderr=err, diff=diff) if rc: status = False data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc) else: status = True data = dict(changed=False) if not build_dep: mark_installed_manually(m, package_names) return (status, data)
34,873
def keras_op_to_relay(inexpr, keras_layer, outname, etab): """Convert keras layer to relay expr, and update etab. Parameters ---------- inexpr : relay.expr.Expr or a list of it The input relay expr(s) keras_layer : keras.layers The keras layer to be converted outname : str Name of the output relay expr etab : relay.frontend.common.ExprTable The global expr table to be updated """ if type(keras_layer).__name__ not in _convert_map: raise NotImplementedError("{} is not supported".format((type(keras_layer).__name__))) outs = _convert_map[type(keras_layer).__name__](inexpr, keras_layer, etab) outs = _as_list(outs) for t_idx, out in enumerate(outs): name = outname + ":" + str(t_idx) etab.set_expr(name, out)
def keras_op_to_relay(inexpr, keras_layer, outname, etab): """Convert keras layer to relay expr, and update etab. Parameters ---------- inexpr : relay.expr.Expr or a list of it The input relay expr(s) keras_layer : keras.layers The keras layer to be converted outname : str Name of the output Relay expression. etab : relay.frontend.common.ExprTable The global expr table to be updated """ if type(keras_layer).__name__ not in _convert_map: raise NotImplementedError("{} is not supported".format((type(keras_layer).__name__))) outs = _convert_map[type(keras_layer).__name__](inexpr, keras_layer, etab) outs = _as_list(outs) for t_idx, out in enumerate(outs): name = outname + ":" + str(t_idx) etab.set_expr(name, out)
23,600
def get_irradiance(site_location, date, tilt, surface_azimuth): # Creates one day's worth of 10 min intervals times = pd.date_range(date, freq='10min', periods=6*24, tz=tz) # Generate cleaersky data using the Ineichen model, which is the default # The get_clearsky method returns a dataframe with values for GHI, DNI, # and DHI clearsky_ghi = site_location.get_clearsky(times) # Get solar azimuth and zenith to pass to the transposition function solar_position = site_location.get_solarposition(times=times) # Use the get_total_irradiance function to transpose the GHI to POA POA_irradiance = get_total_irradiance( surface_tilt=tilt, surface_azimuth=surface_azimuth, dni=clearsky_ghi['dni'], ghi=clearsky_ghi['ghi'], dhi=clearsky_ghi['dhi'], solar_zenith=solar_position['zenith'], solar_azimuth=solar_position['azimuth']) # Return DataFrame with only GHI and POA return pd.DataFrame({'GHI': clearsky_ghi['ghi'], 'POA': POA_irradiance['poa_global']})
def get_irradiance(site_location, date, tilt, surface_azimuth): # Creates one day's worth of 10 min intervals times = pd.date_range(date, freq='10min', periods=6*24, tz=tz) # Generate clearsky data using the Ineichen model, which is the default # The get_clearsky method returns a dataframe with values for GHI, DNI, # and DHI clearsky_ghi = site_location.get_clearsky(times) # Get solar azimuth and zenith to pass to the transposition function solar_position = site_location.get_solarposition(times=times) # Use the get_total_irradiance function to transpose the GHI to POA POA_irradiance = get_total_irradiance( surface_tilt=tilt, surface_azimuth=surface_azimuth, dni=clearsky_ghi['dni'], ghi=clearsky_ghi['ghi'], dhi=clearsky_ghi['dhi'], solar_zenith=solar_position['zenith'], solar_azimuth=solar_position['azimuth']) # Return DataFrame with only GHI and POA return pd.DataFrame({'GHI': clearsky_ghi['ghi'], 'POA': POA_irradiance['poa_global']})
6,907
def get_messages_from_include_files(app_name=None): """Returns messages from js files included at time of boot like desk.min.js for desk and web""" messages = [] app_include_js = frappe.get_hooks("app_include_js", app_name=app_name) or [] web_include_js = frappe.get_hooks("web_include_js", app_name=app_name) or [] include_js = app_include_js + web_include_js for js_path in include_js: relative_path = os.path.join(frappe.local.sites_path, js_path.strip('/')) messages_from_file = get_messages_from_file(relative_path) messages.extend(messages_from_file) return messages
def get_messages_from_include_files(app_name=None): """Returns messages from js files included at time of boot like desk.min.js for desk and web""" messages = [] app_include_js = frappe.get_hooks("app_include_js", app_name=app_name) or [] web_include_js = frappe.get_hooks("web_include_js", app_name=app_name) or [] include_js = app_include_js + web_include_js for js_path in include_js: relative_path = os.path.join(frappe.local.sites_path, js_path.lstrip('/')) messages_from_file = get_messages_from_file(relative_path) messages.extend(messages_from_file) return messages
26,017
def load_arguments(self, _): # Model imports DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes', operation_group='disks') SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots') UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes') HyperVGenerationTypes = self.get_models('HyperVGenerationTypes') DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes') OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets') RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux') GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries') ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions') # REUSABLE ARGUMENT DEFINITIONS name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME') multi_ids_type = CLIArgumentType(nargs='+') existing_vm_name = CLIArgumentType(overrides=name_arg_type, configured_default='vm', help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`", completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name') existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name') existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name') vmss_name_type = CLIArgumentType(name_arg_type, configured_default='vmss', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`", id_part='name') extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.") image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name') disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name') ephemeral_placement_type = CLIArgumentType(options_list=['--ephemeral-os-disk-placement', '--ephemeral-placement'], arg_type=get_enum_type(['ResourceDisk', 'CacheDisk']), min_api='2019-12-01') license_type = CLIArgumentType( help="Specifies that the Windows image or disk was licensed on-premises. To enable Azure Hybrid Benefit for " "Windows Server, use 'Windows_Server'. To enable Multi-tenant Hosting Rights for Windows 10, " "use 'Windows_Client'. For more information see the Azure Windows VM online docs.", arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_BASE', 'RHEL_SAPAPPS', 'RHEL_SAPHA', 'RHEL_EUS', 'RHEL_BASESAPAPPS', 'RHEL_BASESAPHA', 'SLES_STANDARD', 'SLES', 'SLES_SAP', 'SLES_HPC', 'None', 'RHEL_ELS_6'])) # StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute DiskStorageAccountTypes = DiskStorageAccountTypes or self.get_models('StorageAccountTypes') if DiskStorageAccountTypes: disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes)) else: # StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) if SnapshotStorageAccountTypes: snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes)) else: # SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) # special case for `network nic scale-set list` command alias with self.argument_context('network nic scale-set list') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') HyperVGenerationTypes = HyperVGenerationTypes or self.get_models('HyperVGeneration', operation_group='disks') if HyperVGenerationTypes: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1")) else: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1")) ultra_ssd_enabled_type = CLIArgumentType( arg_type=get_three_state_flag(), min_api='2018-06-01', help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account') scale_in_policy_type = CLIArgumentType( nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')), help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.' ) edge_zone_type = CLIArgumentType( help='The name of edge zone.', min_api='2020-12-01', is_preview=True ) t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries') shared_to_type = CLIArgumentType( arg_type=get_enum_type(t_shared_to), help='The query parameter to decide what shared galleries to fetch when doing listing operations. ' 'If not specified, list by subscription id.' ) marker_type = CLIArgumentType( help='A string value that identifies the portion of the list of containers to be ' 'returned with the next listing operation. The operation returns the NextMarker value within ' 'the response body if the listing operation did not return all containers remaining to be listed ' 'with the current page. If specified, this generator will begin returning results from the point ' 'where the previous generator stopped.') enable_vtpm_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM.') enable_secure_boot_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot.') security_type = CLIArgumentType(arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01', help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.') # region MixedScopes for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']: with self.argument_context(scope) as c: c.argument('tags', tags_type) for scope in ['disk', 'snapshot']: with self.argument_context(scope) as c: c.ignore('source_blob_uri', 'source_disk', 'source_snapshot') c.argument('source_storage_account_id', help='used when source blob is in a different subscription') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int) if self.supported_api_version(min_api='2018-09-30', operation_group='disks'): c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level') c.argument('for_upload', arg_type=get_three_state_flag(), help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope)) c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') else: c.ignore('access_level', 'for_upload', 'hyper_v_generation') c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType', operation_group='disks')), help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.') c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.') c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.') operation_group = 'disks' if scope == 'disk' else 'snapshots' c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group))) c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.') c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable on-demand bursting beyond the provisioned performance target of the disk. On-demand bursting is disabled by default, and it does not apply to Ultra disks.') c.argument('public_network_access', arg_type=get_enum_type(['Disabled', 'Enabled']), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to control the export policy on the disk.') c.argument('accelerated_network', arg_type=get_three_state_flag(), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to enable the accelerated networking if the OS disk image support.') for scope in ['disk create', 'snapshot create']: with self.argument_context(scope) as c: c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name') # endregion # region Disks with self.argument_context('disk', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c: c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to. c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.') c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10") c.argument('upload_size_bytes', type=int, min_api='2019-03-01', help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520') c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time') c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10') c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk') c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('gallery_image_reference', help='ID of the Compute Gallery image version from which to create a disk') c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.') c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.') c.argument('edge_zone', edge_zone_type) c.argument('security_type', arg_type=get_enum_type(self.get_models('DiskSecurityTypes', operation_group='disks')), help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01') c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='disks')), min_api='2021-12-01', help='CPU architecture.') c.argument('data_access_auth_mode', arg_type=get_enum_type(['AzureActiveDirectory', 'None']), min_api='2021-12-01', help='Specify the auth mode when exporting or uploading to a disk or snapshot.') # endregion # region Snapshots with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c: c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=snapshot_sku) c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01', help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed') c.argument('edge_zone', edge_zone_type) c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='snapshots')), min_api='2021-12-01', help='CPU architecture.') # endregion # region Images with self.argument_context('image') as c: c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux'])) c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images')) c.argument('tags', tags_type) with self.argument_context('image create') as c: # here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources c.argument('name', arg_type=name_arg_type, help='new image name') c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name') c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name') c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. ' 'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage') c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.') c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.") c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's data disk.") c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.') c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots') c.argument('edge_zone', edge_zone_type, ) # endregion # region Image Templates with self.argument_context('image builder') as c: ib_output_name_help = "Name of the image builder run output." c.argument('location', get_location_type(self.cli_ctx)) c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL." " Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'") c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.") c.argument('image_template_name', image_template_name_type, help="The name of the image template.") c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image") c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.') c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." ' 'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.') c.argument('output_name', help=ib_output_name_help) c.ignore('destinations_lists', 'scripts_list', 'source_dict') with self.argument_context('image builder create') as c: ib_source_type = CLIArgumentType(arg_group="Image Source") ib_customizer_type = CLIArgumentType(arg_group="Customizer") ib_cutput_type = CLIArgumentType(arg_group="Output") c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.") c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json') c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.') # VM profile c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)') c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size') c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name') c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine') c.argument('proxy_vm_size', help='Size of the virtual machine used to build, customize and capture images (Standard_D1_v2 for Gen1 images and Standard_D2ds_v4 for Gen2 images).') c.argument('build_vm_identities', nargs='+', help='Optional configuration of the virtual network to use to deploy the build virtual machine in. Omit if no specific virtual network needs to be used.') # Image Source Arguments c.argument('source', arg_type=ib_source_type) c.argument('checksum', arg_type=ib_source_type) c.argument('', arg_type=ib_source_type) # Image Customizer Arguments c.argument('scripts', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) # Image Output Arguments c.argument('managed_image_destinations', arg_type=ib_cutput_type) c.argument('shared_image_destinations', arg_type=ib_cutput_type) c.argument('output_name', arg_type=ib_cutput_type) with self.argument_context('image builder output') as c: ib_sig_regions_help = "Space-separated list of regions to replicate the image version into." ib_img_location_help = "Location where the customized image will be created." c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.") c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.") c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help) c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.") c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help) with self.argument_context('image builder output add') as c: ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help'] ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"]) ib_default_loc_help = " Defaults to resource group's location." c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.") c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help) c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help) c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true') c.argument('tags', arg_type=ib_artifact_tags_type) c.ignore('location') with self.argument_context('image builder customizer') as c: ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart") ib_win_update_type = CLIArgumentType(arg_group="Windows Update") ib_script_type = CLIArgumentType(arg_group="Shell and Powershell") ib_powershell_type = CLIArgumentType(arg_group="Powershell") ib_file_customizer_type = CLIArgumentType(arg_group="File") c.argument('customizer_name', help="Name of the customizer.") c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType)) # Script Args c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.") c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.") # Powershell Specific Args c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers") # Windows Restart Specific Args c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.") c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.") c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m") # Windows Update Specific Args c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.') c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)') c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)') # File Args c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.") c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image") # endregion # region AvailabilitySets with self.argument_context('vm availability-set') as c: c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') with self.argument_context('vm availability-set create') as c: c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set') c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.') c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.') c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks') with self.argument_context('vm availability-set update') as c: if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'): c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') c.argument('availability_set_name', options_list=['--availability-set-name']) # endregion # region VirtualMachines with self.argument_context('vm') as c: c.argument('vm_name', existing_vm_name) c.argument('size', completer=get_vm_size_completion_list) c.argument('name', arg_type=name_arg_type) c.argument('zone', zone_type, min_api='2017-03-30') c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH'])) c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network') c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.') with self.argument_context('vm capture') as c: c.argument('overwrite', action='store_true') with self.argument_context('vm update') as c: c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to") c.argument('write_accelerator', nargs='*', min_api='2017-12-01', help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2") c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('size', help='The new size of the virtual machine. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--size`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create') as c: c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines')) c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None) c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage') c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.') c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.') c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE'])) c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('boot_diagnostics_storage', help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS') c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network', help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size") if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE): VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine") c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.') c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01') c.argument('platform_fault_domain', min_api='2020-06-01', help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view') c.argument('count', type=int, is_preview=True, help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create', arg_group='Storage') as c: c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.") with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.") with self.argument_context('vm open-port') as c: c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.') c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name) c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true') c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range.") c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int) for scope in ['vm show', 'vm list']: with self.argument_context(scope) as c: c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow') for scope in ['vm show', 'vmss show']: with self.argument_context(scope) as c: c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01') for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']: with self.argument_context(scope) as c: c.ignore('include_user_data') with self.argument_context('vm diagnostics') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name']) with self.argument_context('vm diagnostics set') as c: c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts')) with self.argument_context('vm install-patches') as c: c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)') c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.') c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.') c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.') c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only') c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only') c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help="Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only") c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') with self.argument_context('vm disk') as c: c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines')) c.argument('new', action='store_true', help='create a new disk') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') with self.argument_context('vm disk attach') as c: c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator') c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)], help="The name or ID of the managed disk", id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('disks', nargs='*', help="One or more names or IDs of the managed disk (space-delimited).", completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('ids', deprecate_info=c.deprecate(target='--ids', redirect='--disks', hide=True)) with self.argument_context('vm disk detach') as c: c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.') with self.argument_context('vm encryption enable') as c: c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted. (Only supported for Linux virtual machines.)') # Place aad arguments in their own group aad_arguments = 'Azure Active Directory' c.argument('aad_client_id', arg_group=aad_arguments) c.argument('aad_client_secret', arg_group=aad_arguments) c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments) with self.argument_context('vm extension') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1') c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(expiration='3.0.0', hide=True)) with self.argument_context('vm extension list') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm extension show') as c: c.argument('instance_view', action='store_true', help='The instance view of a virtual machine extension.') with self.argument_context('vm secret') as c: c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'') c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault) c.argument('certificate', help='key vault certificate name or its full secret URL') c.argument('certificate_store', help='Windows certificate store names. Default: My') with self.argument_context('vm secret list') as c: c.argument('vm_name', arg_type=existing_vm_name, id_part=None) with self.argument_context('vm image') as c: c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('plan', help='image billing plan') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('version', help="image sku's version") c.argument('urn', help="URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted") with self.argument_context('vm image list') as c: c.argument('image_location', get_location_type(self.cli_ctx)) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-offers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-skus') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-publishers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image show') as c: c.argument('skus', options_list=['--sku', '-s']) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image terms') as c: c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted') c.argument('publisher', help='Image publisher') c.argument('offer', help='Image offer') c.argument('plan', help='Image billing plan') with self.argument_context('vm nic') as c: c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None) c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics) c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.') with self.argument_context('vm nic show') as c: c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic) with self.argument_context('vm unmanaged-disk') as c: c.argument('new', action='store_true', help='Create a new disk.') c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd") with self.argument_context('vm unmanaged-disk attach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) with self.argument_context('vm unmanaged-disk detach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']: with self.argument_context(scope) as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm unmanaged-disk list') as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) with self.argument_context('vm user') as c: c.argument('username', options_list=['--username', '-u'], help='The user name') c.argument('password', options_list=['--password', '-p'], help='The user password') with self.argument_context('vm list-skus') as c: c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted") c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones") c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(), help="show all information including vm sizes not available under the current subscription") c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc') with self.argument_context('vm restart') as c: c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.') with self.argument_context('vm host') as c: c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group") c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host") c.ignore('expand') with self.argument_context('vm host create') as c: c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int, help="Fault domain of the host within a group. Allowed values: 0, 1, 2") c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(), help="Replace the host automatically if a failure occurs") c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes), help="The software license type that will be applied to the VMs deployed on the dedicated host.") c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/") with self.argument_context('vm host list') as c: c.argument('host_group_name', id_part=None) with self.argument_context('vm host group') as c: c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group") c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Specify whether virtual machines or virtual machine scale sets can be placed automatically ' 'on the dedicated host group. Automatic placement means resources are allocated on dedicated ' 'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to ' 'false when not provided.') with self.argument_context('vm host group create') as c: c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int, help="Number of fault domains that the host group can span.") c.argument('zones', zone_type) for scope in ["vm host", "vm host group"]: with self.argument_context("{} create".format(scope)) as c: location_type = get_location_type(self.cli_ctx) custom_location_msg = " Otherwise, location will default to the resource group's location" custom_location_type = CLIArgumentType(overrides=location_type, help=location_type.settings["help"] + custom_location_msg) c.argument('location', arg_type=custom_location_type) # endregion # region VMSS scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name'] with self.argument_context('vmss') as c: c.argument('zones', zones_type, min_api='2017-03-30') c.argument('instance_id', id_part='child_name_1') c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself') c.argument('tags', tags_type) c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type) c.argument('host_group', min_api='2020-06-01', help='Name or ID of dedicated host group that the virtual machine scale set resides in') for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']: with self.argument_context(scope) as c: for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c: VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('name', name_arg_type) c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.') c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group." " See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.") c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01') c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.') c.argument('instance_count', help='Number of VMs in the scale set.', type=int) c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true') c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode)) c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs') c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network') c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set") c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long') c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.', arg_type=get_enum_type(['Uniform', 'Flexible'])) c.argument('scale_in_policy', scale_in_policy_type) c.argument('automatic_repairs_grace_period', min_api='2018-10-01', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.') c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('network_api_version', min_api='2021-03-01', help="Specify the Microsoft.Network API version used when creating networking resources in the Network " "Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default " "value is 2020-11-01.") c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss create', arg_group='Network Balancer') as c: LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway']) c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.') c.argument('app_gateway_sku', help='SKU when creating a new application gateway.') c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.') c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.') c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int) c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb']) c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName), help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'") c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name']) with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c: c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules") c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`") c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6") c.argument('accelerated_networking', arg_type=get_three_state_flag(), help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size") with self.argument_context('vmss update') as c: protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01') c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.") c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).") c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(), help='Enable terminate notification') c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('scale_in_policy', scale_in_policy_type) c.argument('force_deletion', action='store_true', is_preview=True, help='This property allow you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('vm_sku', help='The new size of the virtual machine instances in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--vm-sku`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c: c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs') c.argument( 'automatic_repairs_grace_period', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.' ) c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') for scope in ['vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('terminate_notification_time', min_api='2019-03-01', help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted') c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01', help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%') c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%') c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%') c.argument('pause_time_between_batches', min_api='2020-12-01', help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds') c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size') c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances') for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]: with self.argument_context(scope) as c: c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix)) for scope in ['vmss update-instances', 'vmss delete-instances']: with self.argument_context(scope) as c: c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.') with self.argument_context('vmss diagnostics') as c: c.argument('vmss_name', id_part=None, help='Scale set name') with self.argument_context('vmss disk') as c: options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']] new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances', min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') with self.argument_context('vmss encryption') as c: c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) with self.argument_context('vmss extension') as c: c.argument('extension_name', name_arg_type, help='Name of the extension.') c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss nic') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1') c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2') with self.argument_context('vmss nic list') as c: c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss set-orchestration-service-state') as c: c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.') c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.') # endregion # region VM & VMSS Shared for scope in ['vm', 'vmss']: with self.argument_context(scope) as c: c.argument('no_auto_upgrade', options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')], arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.') with self.argument_context('{} run-command'.format(scope)) as c: c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope)) if scope == 'vmss': c.argument('vmss_name', vmss_name_type) with self.argument_context('{} run-command invoke'.format(scope)) as c: c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'") c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file") with self.argument_context('{} stop'.format(scope)) as c: c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01') run_cmd_name_type = CLIArgumentType(options_list=['--name', '--run-command-name'], help='The name of the virtual machine run command.') run_cmd_vm_name = CLIArgumentType(options_list=['--vm-name'], help='The name of the virtual machine') for scope in ['create', 'update']: with self.argument_context('vm run-command {}'.format(scope)) as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using "list" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Specify the Azure storage blob where script output stream will be uploaded.') c.argument('error_blob_uri', help='Specify the Azure storage blob where script error stream will be uploaded.') with self.argument_context('vm run-command delete') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vm run-command list') as c: c.argument('vm_name', run_cmd_vm_name, id_part=None) c.argument('expand', help='The expand expression to apply on the operation.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) with self.argument_context('vm run-command show') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') with self.argument_context('vm run-command wait') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') run_cmd_vmss_name = CLIArgumentType(options_list=['--vmss-name'], help='The name of the VM scale set.') for scope in ['create', 'update']: with self.argument_context('vmss run-command {}'.format(scope)) as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using "list" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Uri (without SAS) to an append blob where the script output will be uploaded.') c.argument('error_blob_uri', help='Uri (without SAS) to an append blob where the script error stream will be uploaded.') with self.argument_context('vmss run-command delete') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vmss run-command list') as c: c.argument('vmss_name', run_cmd_vmss_name, id_part=None) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('expand', help='The expand expression to apply on the operation.') with self.argument_context('vmss run-command show') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity remove', 'vmss identity remove']: with self.argument_context(scope) as c: c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity show', 'vmss identity show']: with self.argument_context(scope) as c: c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm application set', 'vmss application set']: with self.argument_context(scope) as c: c.argument('vm', existing_vm_name) c.argument('vmss_name', vmss_name_type) c.argument('application_version_ids', options_list=['--app-version-ids'], nargs='*', help="Space-separated application version ids to set to VM.") c.argument('order_applications', action='store_true', help='Whether to set order index at each gallery applications. If specified, the first app version id get specified an order = 1, then next one 2, and so on. This parameter is meant to be used when the VMApplications specified by app version ids must be installed in a particular order; lowest order is installed first.') c.argument('application_configuration_overrides', options_list=['--app-config-overrides'], nargs='*', help='Space-separated application configuration overrides for each application version ids. ' 'It should have the same number of items as the application version ids. Null is available for a application ' 'which does not have a configuration override.') for scope in ['vm application list', 'vmss application list']: with self.argument_context(scope) as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) c.argument('vmss_name', vmss_name_type, id_part=None) for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location') c.argument('tags', tags_type) c.argument('no_wait', help='Do not wait for the long-running operation to finish.') c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('image', completer=get_urn_aliases_completion_list) c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type) c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter()) c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples") c.ignore('aux_subscriptions') c.argument('edge_zone', edge_zone_type) c.argument('accept_term', action='store_true', help="Accept the license agreement and privacy statement.") c.argument('disable_integrity_monitoring', action='store_true', min_api='2020-12-01', help='Disable the default behavior of installing guest attestation extension and enabling System Assigned Identity for Trusted Launch enabled VMs and VMSS.') with self.argument_context(scope, arg_group='Authentication') as c: c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory') c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.') c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.") c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+') c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.') c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all'])) with self.argument_context(scope, arg_group='Storage') as c: if DiskStorageAccountTypes: allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes]) else: allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS']) usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.' allowed_values = 'Allowed values: {}.'.format(allowed_values) storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \ 'or specify individual disks. {} {}'.format(usage, allowed_values) c.argument('os_disk_name', help='The name of the new VM OS disk.') c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux'])) c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.") c.argument('storage_sku', nargs='+', help=storage_sku_help) c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds") c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile') c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM') c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.') c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create') c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type') c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes)) c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+', help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--ephemeral-os-disk`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.') c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01', help='Names or IDs (space delimited) of disk encryption sets for data disks.') c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.') c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.') c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01', help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.') c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)], nargs='+', min_api='2021-03-01', help='Specify whether data disk should be deleted or detached upon VM deletion. If a single data disk is attached, the allowed values are Delete and Detach. For multiple data disks are attached, please use "<data_disk>=Delete <data_disk2>=Detach" to configure each disk') with self.argument_context(scope, arg_group='Network') as c: c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.') c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.') c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.') c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.') c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.') c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).') c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).') c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static'])) c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.') if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK): PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'', default=None, arg_type=get_enum_type(PublicIPAddressSkuName)) c.argument('nic_delete_option', nargs='+', min_api='2021-03-01', help='Specify what happens to the network interface when the VM is deleted. Use a singular ' 'value to apply on all resources, or use <Name>=<Value> to configure ' 'the delete behavior for individual resources. Possible options are Delete and Detach.') with self.argument_context(scope, arg_group='Marketplace Image Plan') as c: c.argument('plan_name', help='plan name') c.argument('plan_product', help='plan product') c.argument('plan_publisher', help='plan publisher') c.argument('plan_promotion_code', help='plan promotion code') for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access. ") c.ignore('identity_role_id') for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], arg_group='Managed Service Identity', help='Role name or id the system assigned identity will have. ') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], help="Role name or id the system assigned identity will have") with self.argument_context('vm auto-shutdown') as c: c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.') c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)') c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730') c.argument('webhook', help='The webhook URL to which the notification will be sent') c.argument('location', validator=get_default_location_from_resource_group) for scope in ['vm diagnostics', 'vmss diagnostics']: with self.argument_context(scope) as c: c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied') c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('is_windows_os', action='store_true', help='for Windows VMs') for scope in ['vm encryption', 'vmss encryption']: with self.argument_context(scope) as c: c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL'])) c.argument('force', action='store_true', help='continue by ignoring client side validation errors') c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.') c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.') c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.') for scope in ['vm extension', 'vmss extension']: with self.argument_context(scope) as c: c.argument('publisher', help='The name of the extension publisher.') c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.') c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.') c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.') c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(), help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.') with self.argument_context('vm extension set') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part=None) c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) with self.argument_context('vmss extension set', min_api='2017-12-01') as c: c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.') for scope in ['vm extension image', 'vmss extension image']: with self.argument_context(scope) as c: c.argument('image_location', options_list=['--location', '-l'], help='Image location.') c.argument('name', help='Image name', id_part=None) c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name') c.argument('type', options_list=['--name', '-n'], help='Name of the extension') c.argument('latest', action='store_true', help='Show the latest version only.') c.argument('version', help='Extension version') c.argument('orderby', help="the $orderby odata query option") c.argument('top', help='the $top odata query option') for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('license_type', license_type) c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.") c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True, help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons') c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'], help='The ID or name of the capacity reservation group that is used to allocate. Pass in "None" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.', min_api='2021-04-01', is_preview=True) c.argument('v_cpus_available', type=int, min_api='2021-11-01', help='Specify the number of vCPUs available') c.argument('v_cpus_per_core', type=int, min_api='2021-11-01', help='Specify the ratio of vCPU to physical core. Setting this property to 1 also means that hyper-threading is disabled.') with self.argument_context('vm update') as c: c.argument('license_type', license_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') with self.argument_context('vmss create') as c: c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.") with self.argument_context('sig') as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition') c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version') for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']: with self.argument_context(scope) as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition') with self.argument_context('sig list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('shared_to', shared_to_type) with self.argument_context('sig show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') for scope in ['sig share add', 'sig share remove']: with self.argument_context(scope) as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.') c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.') with self.argument_context('sig share add') as c: c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share remove') as c: c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share reset') as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') with self.argument_context('sig image-definition create') as c: c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD') c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.") c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores') c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores') c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB') c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB') c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan') c.argument('plan_name', help='plan name', arg_group='Purchase plan') c.argument('plan_product', help='plan product', arg_group='Purchase plan') c.argument('eula', help='The Eula agreement for the gallery image') c.argument('privacy_statement_uri', help='The privacy statement uri') c.argument('release_note_uri', help='The release note uri') c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'") c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS') c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='gallery_images')), min_api='2021-10-01', help='CPU architecture.') with self.argument_context('sig image-definition list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-definition show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') with self.argument_context('sig create') as c: c.argument('description', help='the description of the gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig update') as c: c.ignore('gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig image-definition create') as c: c.argument('description', help='the description of the gallery image definition') with self.argument_context('sig image-definition update') as c: c.ignore('gallery_image') with self.argument_context('sig image-version') as c: deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0") c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`') with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c: c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`') c.argument('description', help='the description of the gallery image version') c.argument('managed_image', help='image name(if in the same resource group) or resource id') c.argument('os_snapshot', help='Name or ID of OS disk snapshot') c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots') c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots') c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.') c.argument('version', help='image version') c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'") c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions", arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01') c.argument('target_region_encryption', nargs='+', help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.') c.argument('os_vhd_uri', help='Source VHD URI of OS disk') c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk') c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks') c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks') c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks') c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.') c.argument('target_region_cvm_encryption', nargs='+', min_api='2021-10-01', help='Space-separated list of customer managed key for Confidential VM encrypting the OS disk in the gallery artifact for each region. Format for each region: `<os_cvm_encryption_type>,<os_cvm_des>`. The valid values for os_cvm_encryption_type are EncryptedVMGuestStateOnlyWithPmk, EncryptedWithPmk, EncryptedWithCmk.') with self.argument_context('sig image-version list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-version show') as c: c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'") with self.argument_context('sig image-version show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The ' 'name of the gallery image version to be created. Needs to follow semantic version name pattern: ' 'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. ' 'Format: <MajorVersion>.<MinorVersion>.<Patch>', id_part='child_name_3') for scope in ['sig image-version create', 'sig image-version update']: with self.argument_context(scope) as c: c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace, help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. ' 'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used') c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int) # endregion # region Gallery applications with self.argument_context('sig gallery-application') as c: c.argument('gallery_application_name', options_list=['--name', '-n', '--application-name'], help='The name of the gallery Application') with self.argument_context('sig gallery-application create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='This property allows you ' 'to specify the supported type of the OS that application is built for. <br><br> Possible values ' 'are: <br><br> **Windows** <br><br> **Linux**') with self.argument_context('sig gallery-application update') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') with self.argument_context('sig gallery-application version') as c: c.argument('gallery_application_name', options_list=['--application-name'], help='The name of the gallery Application') c.argument('gallery_application_version_name', options_list=['--name', '-n', '--version-name'], help='The name of the gallery Application Version') for scope in ['create', 'update']: with self.argument_context('sig gallery-application version {}'.format(scope)) as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('package_file_link', help='The mediaLink of the artifact, must be a readable storage page blob.') c.argument('install_command', help='The path and arguments to install the gallery application.') c.argument('remove_command', help='The path and arguments to remove the gallery application.') c.argument('update_command', help='The path and arguments to update the gallery application. If not present,' ' then update operation will invoke remove command on the previous version ' 'and install command on the current version of the gallery application.') c.argument('target_regions', type=validate_file_or_dict, help='The target regions where the Image Version is ' 'going to be replicated to. This property is updatable. Expected value: ' 'json-string/json-file/@json-file.') c.argument('default_file_link', help='The default configuration link of the artifact, must be a readable storage page blob.') c.argument('exclude_from', arg_type=get_three_state_flag(), help='If set to true, Virtual Machines ' 'deployed from the latest version of the Image Definition won\'t use this Image Version.', arg_group='Publishing Profile') c.argument('end_of_life_date', help='The end of life date of the gallery image version. This property can be ' 'used for decommissioning purposes. This property is updatable.', arg_group='Publishing Profile') # endregion # region Proximity Placement Group with self.argument_context('ppg', min_api='2018-04-01') as c: c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.") with self.argument_context('ppg create', min_api='2018-04-01') as c: c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.") c.argument('tags', tags_type) with self.argument_context('ppg show', min_api='2019-07-01') as c: c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.') for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'), ('vm availability-set create', 'availability set'), ('vm update', 'VM'), ('vmss update', 'VMSS'), ('vm availability-set update', 'availability set')]: with self.argument_context(scope, min_api='2018-04-01') as c: c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item), validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added. # endregion # region VM Monitor with self.argument_context('vm monitor log show') as c: c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.") c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.") with self.argument_context('vm monitor metrics') as c: c.argument('metricnamespace', options_list=['--namespace'], help='Namespace to query metric definitions for.') with self.argument_context('vm monitor metrics tail') as c: from azure.mgmt.monitor.models import AggregationType c.extra('resource_group_name', required=True) c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) c.argument('metadata', action='store_true') c.argument('dimension', nargs='*', validator=validate_metric_dimension) c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*') c.argument('metrics', nargs='*') c.argument('orderby', help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc') c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.') c.argument('filters', options_list=['--filter']) c.argument('metric_namespace', options_list=['--namespace']) with self.argument_context('vm monitor metrics tail', arg_group='Time') as c: c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.')) c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.')) c.argument('offset', type=get_period_type(as_timedelta=True)) c.argument('interval', arg_group='Time', type=get_period_type()) with self.argument_context('vm monitor metrics list-definitions') as c: c.extra('resource_group_name', required=True) c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) # endregion # region disk encryption set with self.argument_context('disk-encryption-set') as c: c.argument('disk_encryption_set_name', disk_encryption_set_name) c.argument('key_url', help='URL pointing to a key or secret in KeyVault.') c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.') c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']), help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01', options_list=['--enable-auto-key-rotation', '--auto-rotation'], help='Enable automatic rotation of keys.') # endregion # region DiskAccess with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c: c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) # endRegion # region Capacity with self.argument_context('capacity reservation group') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'], help='The name of the capacity reservation group.') c.argument('tags', tags_type) with self.argument_context('capacity reservation group create') as c: c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.') with self.argument_context('capacity reservation group show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.') with self.argument_context('capacity reservation group list') as c: c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.') c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.') with self.argument_context('capacity reservation') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'], help='The name of the capacity reservation group.') c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'], help='The name of the capacity reservation.') c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.') c.argument('tags', tags_type) with self.argument_context('capacity reservation create') as c: c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.') c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called "CapacityReservationSupported" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.') with self.argument_context('capacity reservation show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.') # endRegion # region Restore point with self.argument_context('restore-point') as c: c.argument('restore_point_collection_name', options_list=['--collection-name'], help='The name of the restore point collection.') with self.argument_context('restore-point create') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('exclude_disks', nargs='+', help='List of disk resource ids that the ' 'customer wishes to exclude from the restore point. If no disks are specified, all disks will be ' 'included.') c.argument('source_restore_point', help='Resource Id of the source restore point from which a copy needs to be created') with self.argument_context('restore-point show') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='Show the instance view of a restore point.') with self.argument_context('restore-point delete') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') with self.argument_context('restore-point wait') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') # endRegion # region Restore point collection with self.argument_context('restore-point collection create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('source_id', help='Resource Id of the source resource used to create this restore point collection', arg_group='Source') with self.argument_context('restore-point collection update') as c: c.argument('tags', tags_type) with self.argument_context('restore-point collection show') as c: c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('restore_points', action='store_true', help='Show all contained restore points in the restore point collection.')
def load_arguments(self, _): # Model imports DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes', operation_group='disks') SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots') UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes') HyperVGenerationTypes = self.get_models('HyperVGenerationTypes') DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes') OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets') RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux') GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries') ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions') # REUSABLE ARGUMENT DEFINITIONS name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME') multi_ids_type = CLIArgumentType(nargs='+') existing_vm_name = CLIArgumentType(overrides=name_arg_type, configured_default='vm', help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`", completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name') existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name') existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name') vmss_name_type = CLIArgumentType(name_arg_type, configured_default='vmss', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`", id_part='name') extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.") image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name') disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name') ephemeral_placement_type = CLIArgumentType(options_list=['--ephemeral-os-disk-placement', '--ephemeral-placement'], arg_type=get_enum_type(['ResourceDisk', 'CacheDisk']), min_api='2019-12-01') license_type = CLIArgumentType( help="Specifies that the Windows image or disk was licensed on-premises. To enable Azure Hybrid Benefit for " "Windows Server, use 'Windows_Server'. To enable Multi-tenant Hosting Rights for Windows 10, " "use 'Windows_Client'. For more information see the Azure Windows VM online docs.", arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_BASE', 'RHEL_SAPAPPS', 'RHEL_SAPHA', 'RHEL_EUS', 'RHEL_BASESAPAPPS', 'RHEL_BASESAPHA', 'SLES_STANDARD', 'SLES', 'SLES_SAP', 'SLES_HPC', 'None', 'RHEL_ELS_6'])) # StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute DiskStorageAccountTypes = DiskStorageAccountTypes or self.get_models('StorageAccountTypes') if DiskStorageAccountTypes: disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes)) else: # StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) if SnapshotStorageAccountTypes: snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes)) else: # SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) # special case for `network nic scale-set list` command alias with self.argument_context('network nic scale-set list') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') HyperVGenerationTypes = HyperVGenerationTypes or self.get_models('HyperVGeneration', operation_group='disks') if HyperVGenerationTypes: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1")) else: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1")) ultra_ssd_enabled_type = CLIArgumentType( arg_type=get_three_state_flag(), min_api='2018-06-01', help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account') scale_in_policy_type = CLIArgumentType( nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')), help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.' ) edge_zone_type = CLIArgumentType( help='The name of edge zone.', min_api='2020-12-01', is_preview=True ) t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries') shared_to_type = CLIArgumentType( arg_type=get_enum_type(t_shared_to), help='The query parameter to decide what shared galleries to fetch when doing listing operations. ' 'If not specified, list by subscription id.' ) marker_type = CLIArgumentType( help='A string value that identifies the portion of the list of containers to be ' 'returned with the next listing operation. The operation returns the NextMarker value within ' 'the response body if the listing operation did not return all containers remaining to be listed ' 'with the current page. If specified, this generator will begin returning results from the point ' 'where the previous generator stopped.') enable_vtpm_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM.') enable_secure_boot_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot.') security_type = CLIArgumentType(arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01', help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.') # region MixedScopes for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']: with self.argument_context(scope) as c: c.argument('tags', tags_type) for scope in ['disk', 'snapshot']: with self.argument_context(scope) as c: c.ignore('source_blob_uri', 'source_disk', 'source_snapshot') c.argument('source_storage_account_id', help='used when source blob is in a different subscription') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int) if self.supported_api_version(min_api='2018-09-30', operation_group='disks'): c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level') c.argument('for_upload', arg_type=get_three_state_flag(), help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope)) c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') else: c.ignore('access_level', 'for_upload', 'hyper_v_generation') c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType', operation_group='disks')), help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.') c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.') c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.') operation_group = 'disks' if scope == 'disk' else 'snapshots' c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group))) c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.') c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable on-demand bursting beyond the provisioned performance target of the disk. On-demand bursting is disabled by default, and it does not apply to Ultra disks.') c.argument('public_network_access', arg_type=get_enum_type(['Disabled', 'Enabled']), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to control the export policy on the disk.') c.argument('accelerated_network', arg_type=get_three_state_flag(), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to enable the accelerated networking if the OS disk image support.') for scope in ['disk create', 'snapshot create']: with self.argument_context(scope) as c: c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name') # endregion # region Disks with self.argument_context('disk', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c: c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to. c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.') c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10") c.argument('upload_size_bytes', type=int, min_api='2019-03-01', help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520') c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time') c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10') c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk') c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('gallery_image_reference', help='ID of the Compute Gallery image version from which to create a disk') c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.') c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.') c.argument('edge_zone', edge_zone_type) c.argument('security_type', arg_type=get_enum_type(self.get_models('DiskSecurityTypes', operation_group='disks')), help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01') c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='disks')), min_api='2021-12-01', help='CPU architecture.') c.argument('data_access_auth_mode', arg_type=get_enum_type(['AzureActiveDirectory', 'None']), min_api='2021-12-01', help='Specify the auth mode when exporting or uploading to a disk or snapshot.') # endregion # region Snapshots with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c: c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=snapshot_sku) c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01', help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed') c.argument('edge_zone', edge_zone_type) c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='snapshots')), min_api='2021-12-01', help='CPU architecture.') # endregion # region Images with self.argument_context('image') as c: c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux'])) c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images')) c.argument('tags', tags_type) with self.argument_context('image create') as c: # here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources c.argument('name', arg_type=name_arg_type, help='new image name') c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name') c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name') c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. ' 'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage') c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.') c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.") c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's data disk.") c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.') c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots') c.argument('edge_zone', edge_zone_type, ) # endregion # region Image Templates with self.argument_context('image builder') as c: ib_output_name_help = "Name of the image builder run output." c.argument('location', get_location_type(self.cli_ctx)) c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL." " Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'") c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.") c.argument('image_template_name', image_template_name_type, help="The name of the image template.") c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image") c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.') c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." ' 'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.') c.argument('output_name', help=ib_output_name_help) c.ignore('destinations_lists', 'scripts_list', 'source_dict') with self.argument_context('image builder create') as c: ib_source_type = CLIArgumentType(arg_group="Image Source") ib_customizer_type = CLIArgumentType(arg_group="Customizer") ib_cutput_type = CLIArgumentType(arg_group="Output") c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.") c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json') c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.') # VM profile c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)') c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size') c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name') c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine') c.argument('proxy_vm_size', help='Size of the virtual machine used to build, customize and capture images (Standard_D1_v2 for Gen1 images and Standard_D2ds_v4 for Gen2 images).') c.argument('build_vm_identities', nargs='+', help='Optional configuration of the virtual network to use to deploy the build virtual machine in. Omit if no specific virtual network needs to be used.') # Image Source Arguments c.argument('source', arg_type=ib_source_type) c.argument('checksum', arg_type=ib_source_type) c.argument('', arg_type=ib_source_type) # Image Customizer Arguments c.argument('scripts', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) # Image Output Arguments c.argument('managed_image_destinations', arg_type=ib_cutput_type) c.argument('shared_image_destinations', arg_type=ib_cutput_type) c.argument('output_name', arg_type=ib_cutput_type) with self.argument_context('image builder output') as c: ib_sig_regions_help = "Space-separated list of regions to replicate the image version into." ib_img_location_help = "Location where the customized image will be created." c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.") c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.") c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help) c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.") c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help) with self.argument_context('image builder output add') as c: ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help'] ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"]) ib_default_loc_help = " Defaults to resource group's location." c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.") c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help) c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help) c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true') c.argument('tags', arg_type=ib_artifact_tags_type) c.ignore('location') with self.argument_context('image builder customizer') as c: ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart") ib_win_update_type = CLIArgumentType(arg_group="Windows Update") ib_script_type = CLIArgumentType(arg_group="Shell and Powershell") ib_powershell_type = CLIArgumentType(arg_group="Powershell") ib_file_customizer_type = CLIArgumentType(arg_group="File") c.argument('customizer_name', help="Name of the customizer.") c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType)) # Script Args c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.") c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.") # Powershell Specific Args c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers") # Windows Restart Specific Args c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.") c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.") c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m") # Windows Update Specific Args c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.') c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)') c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)') # File Args c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.") c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image") # endregion # region AvailabilitySets with self.argument_context('vm availability-set') as c: c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') with self.argument_context('vm availability-set create') as c: c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set') c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.') c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.') c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks') with self.argument_context('vm availability-set update') as c: if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'): c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') c.argument('availability_set_name', options_list=['--availability-set-name']) # endregion # region VirtualMachines with self.argument_context('vm') as c: c.argument('vm_name', existing_vm_name) c.argument('size', completer=get_vm_size_completion_list) c.argument('name', arg_type=name_arg_type) c.argument('zone', zone_type, min_api='2017-03-30') c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH'])) c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network') c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.') with self.argument_context('vm capture') as c: c.argument('overwrite', action='store_true') with self.argument_context('vm update') as c: c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to") c.argument('write_accelerator', nargs='*', min_api='2017-12-01', help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2") c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('size', help='The new size of the virtual machine. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--size`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create') as c: c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines')) c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None) c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage') c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.') c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.') c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE'])) c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('boot_diagnostics_storage', help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS') c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network', help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size") if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE): VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine") c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.') c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01') c.argument('platform_fault_domain', min_api='2020-06-01', help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view') c.argument('count', type=int, is_preview=True, help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create', arg_group='Storage') as c: c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.") with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.") with self.argument_context('vm open-port') as c: c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.') c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name) c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true') c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range.") c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int) for scope in ['vm show', 'vm list']: with self.argument_context(scope) as c: c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow') for scope in ['vm show', 'vmss show']: with self.argument_context(scope) as c: c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01') for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']: with self.argument_context(scope) as c: c.ignore('include_user_data') with self.argument_context('vm diagnostics') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name']) with self.argument_context('vm diagnostics set') as c: c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts')) with self.argument_context('vm install-patches') as c: c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)') c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.') c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.') c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.') c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only') c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only') c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help="Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only") c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') with self.argument_context('vm disk') as c: c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines')) c.argument('new', action='store_true', help='create a new disk') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') with self.argument_context('vm disk attach') as c: c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator') c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)], help="The name or ID of the managed disk", id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('disks', nargs='*', help="One or more names or IDs of the managed disk (space-delimited).", completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('ids', deprecate_info=c.deprecate(target='--ids', redirect='--disks', hide=True)) with self.argument_context('vm disk detach') as c: c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.') with self.argument_context('vm encryption enable') as c: c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted. (Only supported for Linux virtual machines.)') # Place aad arguments in their own group aad_arguments = 'Azure Active Directory' c.argument('aad_client_id', arg_group=aad_arguments) c.argument('aad_client_secret', arg_group=aad_arguments) c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments) with self.argument_context('vm extension') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1') c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(expiration='3.0.0', hide=True)) with self.argument_context('vm extension list') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm extension show') as c: c.argument('instance_view', action='store_true', help='The instance view of a virtual machine extension.') with self.argument_context('vm secret') as c: c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'') c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault) c.argument('certificate', help='key vault certificate name or its full secret URL') c.argument('certificate_store', help='Windows certificate store names. Default: My') with self.argument_context('vm secret list') as c: c.argument('vm_name', arg_type=existing_vm_name, id_part=None) with self.argument_context('vm image') as c: c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('plan', help='image billing plan') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('version', help="image sku's version") c.argument('urn', help="URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted") with self.argument_context('vm image list') as c: c.argument('image_location', get_location_type(self.cli_ctx)) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-offers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-skus') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-publishers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image show') as c: c.argument('skus', options_list=['--sku', '-s']) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image terms') as c: c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted') c.argument('publisher', help='Image publisher') c.argument('offer', help='Image offer') c.argument('plan', help='Image billing plan') with self.argument_context('vm nic') as c: c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None) c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics) c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.') with self.argument_context('vm nic show') as c: c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic) with self.argument_context('vm unmanaged-disk') as c: c.argument('new', action='store_true', help='Create a new disk.') c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd") with self.argument_context('vm unmanaged-disk attach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) with self.argument_context('vm unmanaged-disk detach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']: with self.argument_context(scope) as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm unmanaged-disk list') as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) with self.argument_context('vm user') as c: c.argument('username', options_list=['--username', '-u'], help='The user name') c.argument('password', options_list=['--password', '-p'], help='The user password') with self.argument_context('vm list-skus') as c: c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted") c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones") c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(), help="show all information including vm sizes not available under the current subscription") c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc') with self.argument_context('vm restart') as c: c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.') with self.argument_context('vm host') as c: c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group") c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host") c.ignore('expand') with self.argument_context('vm host create') as c: c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int, help="Fault domain of the host within a group. Allowed values: 0, 1, 2") c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(), help="Replace the host automatically if a failure occurs") c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes), help="The software license type that will be applied to the VMs deployed on the dedicated host.") c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/") with self.argument_context('vm host list') as c: c.argument('host_group_name', id_part=None) with self.argument_context('vm host group') as c: c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group") c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Specify whether virtual machines or virtual machine scale sets can be placed automatically ' 'on the dedicated host group. Automatic placement means resources are allocated on dedicated ' 'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to ' 'false when not provided.') with self.argument_context('vm host group create') as c: c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int, help="Number of fault domains that the host group can span.") c.argument('zones', zone_type) for scope in ["vm host", "vm host group"]: with self.argument_context("{} create".format(scope)) as c: location_type = get_location_type(self.cli_ctx) custom_location_msg = " Otherwise, location will default to the resource group's location" custom_location_type = CLIArgumentType(overrides=location_type, help=location_type.settings["help"] + custom_location_msg) c.argument('location', arg_type=custom_location_type) # endregion # region VMSS scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name'] with self.argument_context('vmss') as c: c.argument('zones', zones_type, min_api='2017-03-30') c.argument('instance_id', id_part='child_name_1') c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself') c.argument('tags', tags_type) c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type) c.argument('host_group', min_api='2020-06-01', help='Name or ID of dedicated host group that the virtual machine scale set resides in') for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']: with self.argument_context(scope) as c: for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c: VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('name', name_arg_type) c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.') c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group." " See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.") c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01') c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.') c.argument('instance_count', help='Number of VMs in the scale set.', type=int) c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true') c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode)) c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs') c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network') c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set") c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long') c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.', arg_type=get_enum_type(['Uniform', 'Flexible'])) c.argument('scale_in_policy', scale_in_policy_type) c.argument('automatic_repairs_grace_period', min_api='2018-10-01', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.') c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('network_api_version', min_api='2021-03-01', help="Specify the Microsoft.Network API version used when creating networking resources in the Network " "Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default " "value is 2020-11-01.") c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss create', arg_group='Network Balancer') as c: LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway']) c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.') c.argument('app_gateway_sku', help='SKU when creating a new application gateway.') c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.') c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.') c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int) c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb']) c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName), help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'") c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name']) with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c: c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules") c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`") c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6") c.argument('accelerated_networking', arg_type=get_three_state_flag(), help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size") with self.argument_context('vmss update') as c: protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01') c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.") c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).") c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(), help='Enable terminate notification') c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('scale_in_policy', scale_in_policy_type) c.argument('force_deletion', action='store_true', is_preview=True, help='This property allow you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('vm_sku', help='The new size of the virtual machine instances in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--vm-sku`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c: c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs') c.argument( 'automatic_repairs_grace_period', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.' ) c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') for scope in ['vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('terminate_notification_time', min_api='2019-03-01', help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted') c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01', help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%') c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%') c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%') c.argument('pause_time_between_batches', min_api='2020-12-01', help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds') c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size') c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances') for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]: with self.argument_context(scope) as c: c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix)) for scope in ['vmss update-instances', 'vmss delete-instances']: with self.argument_context(scope) as c: c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.') with self.argument_context('vmss diagnostics') as c: c.argument('vmss_name', id_part=None, help='Scale set name') with self.argument_context('vmss disk') as c: options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']] new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances', min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') with self.argument_context('vmss encryption') as c: c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) with self.argument_context('vmss extension') as c: c.argument('extension_name', name_arg_type, help='Name of the extension.') c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss nic') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1') c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2') with self.argument_context('vmss nic list') as c: c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss set-orchestration-service-state') as c: c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.') c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.') # endregion # region VM & VMSS Shared for scope in ['vm', 'vmss']: with self.argument_context(scope) as c: c.argument('no_auto_upgrade', options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')], arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.') with self.argument_context('{} run-command'.format(scope)) as c: c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope)) if scope == 'vmss': c.argument('vmss_name', vmss_name_type) with self.argument_context('{} run-command invoke'.format(scope)) as c: c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'") c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file") with self.argument_context('{} stop'.format(scope)) as c: c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01') run_cmd_name_type = CLIArgumentType(options_list=['--name', '--run-command-name'], help='The name of the virtual machine run command.') run_cmd_vm_name = CLIArgumentType(options_list=['--vm-name'], help='The name of the virtual machine') for scope in ['create', 'update']: with self.argument_context('vm run-command {}'.format(scope)) as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using "list" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Specify the Azure storage blob where script output stream will be uploaded.') c.argument('error_blob_uri', help='Specify the Azure storage blob where script error stream will be uploaded.') with self.argument_context('vm run-command delete') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vm run-command list') as c: c.argument('vm_name', run_cmd_vm_name, id_part=None) c.argument('expand', help='The expand expression to apply on the operation.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) with self.argument_context('vm run-command show') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') with self.argument_context('vm run-command wait') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') run_cmd_vmss_name = CLIArgumentType(options_list=['--vmss-name'], help='The name of the VM scale set.') for scope in ['create', 'update']: with self.argument_context('vmss run-command {}'.format(scope)) as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using "list" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Uri (without SAS) to an append blob where the script output will be uploaded.') c.argument('error_blob_uri', help='Uri (without SAS) to an append blob where the script error stream will be uploaded.') with self.argument_context('vmss run-command delete') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vmss run-command list') as c: c.argument('vmss_name', run_cmd_vmss_name, id_part=None) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('expand', help='The expand expression to apply on the operation.') with self.argument_context('vmss run-command show') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity remove', 'vmss identity remove']: with self.argument_context(scope) as c: c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity show', 'vmss identity show']: with self.argument_context(scope) as c: c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm application set', 'vmss application set']: with self.argument_context(scope) as c: c.argument('vm', existing_vm_name) c.argument('vmss_name', vmss_name_type) c.argument('application_version_ids', options_list=['--app-version-ids'], nargs='*', help="Space-separated application version ids to set to VM.") c.argument('order_applications', action='store_true', help='Whether to set order index at each gallery application. If specified, the first app version id gets specified an order = 1, then the next one 2, and so on. This parameter is meant to be used when the VMApplications specified by app version ids must be installed in a particular order; the lowest order is installed first.') c.argument('application_configuration_overrides', options_list=['--app-config-overrides'], nargs='*', help='Space-separated application configuration overrides for each application version ids. ' 'It should have the same number of items as the application version ids. Null is available for a application ' 'which does not have a configuration override.') for scope in ['vm application list', 'vmss application list']: with self.argument_context(scope) as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) c.argument('vmss_name', vmss_name_type, id_part=None) for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location') c.argument('tags', tags_type) c.argument('no_wait', help='Do not wait for the long-running operation to finish.') c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('image', completer=get_urn_aliases_completion_list) c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type) c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter()) c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples") c.ignore('aux_subscriptions') c.argument('edge_zone', edge_zone_type) c.argument('accept_term', action='store_true', help="Accept the license agreement and privacy statement.") c.argument('disable_integrity_monitoring', action='store_true', min_api='2020-12-01', help='Disable the default behavior of installing guest attestation extension and enabling System Assigned Identity for Trusted Launch enabled VMs and VMSS.') with self.argument_context(scope, arg_group='Authentication') as c: c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory') c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.') c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.") c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+') c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.') c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all'])) with self.argument_context(scope, arg_group='Storage') as c: if DiskStorageAccountTypes: allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes]) else: allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS']) usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.' allowed_values = 'Allowed values: {}.'.format(allowed_values) storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \ 'or specify individual disks. {} {}'.format(usage, allowed_values) c.argument('os_disk_name', help='The name of the new VM OS disk.') c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux'])) c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.") c.argument('storage_sku', nargs='+', help=storage_sku_help) c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds") c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile') c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM') c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.') c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create') c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type') c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes)) c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+', help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--ephemeral-os-disk`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.') c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01', help='Names or IDs (space delimited) of disk encryption sets for data disks.') c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.') c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.') c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01', help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.') c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)], nargs='+', min_api='2021-03-01', help='Specify whether data disk should be deleted or detached upon VM deletion. If a single data disk is attached, the allowed values are Delete and Detach. For multiple data disks are attached, please use "<data_disk>=Delete <data_disk2>=Detach" to configure each disk') with self.argument_context(scope, arg_group='Network') as c: c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.') c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.') c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.') c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.') c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.') c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).') c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).') c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static'])) c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.') if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK): PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'', default=None, arg_type=get_enum_type(PublicIPAddressSkuName)) c.argument('nic_delete_option', nargs='+', min_api='2021-03-01', help='Specify what happens to the network interface when the VM is deleted. Use a singular ' 'value to apply on all resources, or use <Name>=<Value> to configure ' 'the delete behavior for individual resources. Possible options are Delete and Detach.') with self.argument_context(scope, arg_group='Marketplace Image Plan') as c: c.argument('plan_name', help='plan name') c.argument('plan_product', help='plan product') c.argument('plan_publisher', help='plan publisher') c.argument('plan_promotion_code', help='plan promotion code') for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access. ") c.ignore('identity_role_id') for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], arg_group='Managed Service Identity', help='Role name or id the system assigned identity will have. ') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], help="Role name or id the system assigned identity will have") with self.argument_context('vm auto-shutdown') as c: c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.') c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)') c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730') c.argument('webhook', help='The webhook URL to which the notification will be sent') c.argument('location', validator=get_default_location_from_resource_group) for scope in ['vm diagnostics', 'vmss diagnostics']: with self.argument_context(scope) as c: c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied') c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('is_windows_os', action='store_true', help='for Windows VMs') for scope in ['vm encryption', 'vmss encryption']: with self.argument_context(scope) as c: c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL'])) c.argument('force', action='store_true', help='continue by ignoring client side validation errors') c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.') c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.') c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.') for scope in ['vm extension', 'vmss extension']: with self.argument_context(scope) as c: c.argument('publisher', help='The name of the extension publisher.') c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.') c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.') c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.') c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(), help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.') with self.argument_context('vm extension set') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part=None) c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) with self.argument_context('vmss extension set', min_api='2017-12-01') as c: c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.') for scope in ['vm extension image', 'vmss extension image']: with self.argument_context(scope) as c: c.argument('image_location', options_list=['--location', '-l'], help='Image location.') c.argument('name', help='Image name', id_part=None) c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name') c.argument('type', options_list=['--name', '-n'], help='Name of the extension') c.argument('latest', action='store_true', help='Show the latest version only.') c.argument('version', help='Extension version') c.argument('orderby', help="the $orderby odata query option") c.argument('top', help='the $top odata query option') for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('license_type', license_type) c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.") c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True, help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons') c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'], help='The ID or name of the capacity reservation group that is used to allocate. Pass in "None" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.', min_api='2021-04-01', is_preview=True) c.argument('v_cpus_available', type=int, min_api='2021-11-01', help='Specify the number of vCPUs available') c.argument('v_cpus_per_core', type=int, min_api='2021-11-01', help='Specify the ratio of vCPU to physical core. Setting this property to 1 also means that hyper-threading is disabled.') with self.argument_context('vm update') as c: c.argument('license_type', license_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') with self.argument_context('vmss create') as c: c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.") with self.argument_context('sig') as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition') c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version') for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']: with self.argument_context(scope) as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition') with self.argument_context('sig list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('shared_to', shared_to_type) with self.argument_context('sig show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') for scope in ['sig share add', 'sig share remove']: with self.argument_context(scope) as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.') c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.') with self.argument_context('sig share add') as c: c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share remove') as c: c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share reset') as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') with self.argument_context('sig image-definition create') as c: c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD') c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.") c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores') c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores') c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB') c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB') c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan') c.argument('plan_name', help='plan name', arg_group='Purchase plan') c.argument('plan_product', help='plan product', arg_group='Purchase plan') c.argument('eula', help='The Eula agreement for the gallery image') c.argument('privacy_statement_uri', help='The privacy statement uri') c.argument('release_note_uri', help='The release note uri') c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'") c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS') c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='gallery_images')), min_api='2021-10-01', help='CPU architecture.') with self.argument_context('sig image-definition list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-definition show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') with self.argument_context('sig create') as c: c.argument('description', help='the description of the gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig update') as c: c.ignore('gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig image-definition create') as c: c.argument('description', help='the description of the gallery image definition') with self.argument_context('sig image-definition update') as c: c.ignore('gallery_image') with self.argument_context('sig image-version') as c: deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0") c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`') with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c: c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`') c.argument('description', help='the description of the gallery image version') c.argument('managed_image', help='image name(if in the same resource group) or resource id') c.argument('os_snapshot', help='Name or ID of OS disk snapshot') c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots') c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots') c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.') c.argument('version', help='image version') c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'") c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions", arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01') c.argument('target_region_encryption', nargs='+', help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.') c.argument('os_vhd_uri', help='Source VHD URI of OS disk') c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk') c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks') c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks') c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks') c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.') c.argument('target_region_cvm_encryption', nargs='+', min_api='2021-10-01', help='Space-separated list of customer managed key for Confidential VM encrypting the OS disk in the gallery artifact for each region. Format for each region: `<os_cvm_encryption_type>,<os_cvm_des>`. The valid values for os_cvm_encryption_type are EncryptedVMGuestStateOnlyWithPmk, EncryptedWithPmk, EncryptedWithCmk.') with self.argument_context('sig image-version list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-version show') as c: c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'") with self.argument_context('sig image-version show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The ' 'name of the gallery image version to be created. Needs to follow semantic version name pattern: ' 'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. ' 'Format: <MajorVersion>.<MinorVersion>.<Patch>', id_part='child_name_3') for scope in ['sig image-version create', 'sig image-version update']: with self.argument_context(scope) as c: c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace, help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. ' 'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used') c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int) # endregion # region Gallery applications with self.argument_context('sig gallery-application') as c: c.argument('gallery_application_name', options_list=['--name', '-n', '--application-name'], help='The name of the gallery Application') with self.argument_context('sig gallery-application create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='This property allows you ' 'to specify the supported type of the OS that application is built for. <br><br> Possible values ' 'are: <br><br> **Windows** <br><br> **Linux**') with self.argument_context('sig gallery-application update') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') with self.argument_context('sig gallery-application version') as c: c.argument('gallery_application_name', options_list=['--application-name'], help='The name of the gallery Application') c.argument('gallery_application_version_name', options_list=['--name', '-n', '--version-name'], help='The name of the gallery Application Version') for scope in ['create', 'update']: with self.argument_context('sig gallery-application version {}'.format(scope)) as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('package_file_link', help='The mediaLink of the artifact, must be a readable storage page blob.') c.argument('install_command', help='The path and arguments to install the gallery application.') c.argument('remove_command', help='The path and arguments to remove the gallery application.') c.argument('update_command', help='The path and arguments to update the gallery application. If not present,' ' then update operation will invoke remove command on the previous version ' 'and install command on the current version of the gallery application.') c.argument('target_regions', type=validate_file_or_dict, help='The target regions where the Image Version is ' 'going to be replicated to. This property is updatable. Expected value: ' 'json-string/json-file/@json-file.') c.argument('default_file_link', help='The default configuration link of the artifact, must be a readable storage page blob.') c.argument('exclude_from', arg_type=get_three_state_flag(), help='If set to true, Virtual Machines ' 'deployed from the latest version of the Image Definition won\'t use this Image Version.', arg_group='Publishing Profile') c.argument('end_of_life_date', help='The end of life date of the gallery image version. This property can be ' 'used for decommissioning purposes. This property is updatable.', arg_group='Publishing Profile') # endregion # region Proximity Placement Group with self.argument_context('ppg', min_api='2018-04-01') as c: c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.") with self.argument_context('ppg create', min_api='2018-04-01') as c: c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.") c.argument('tags', tags_type) with self.argument_context('ppg show', min_api='2019-07-01') as c: c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.') for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'), ('vm availability-set create', 'availability set'), ('vm update', 'VM'), ('vmss update', 'VMSS'), ('vm availability-set update', 'availability set')]: with self.argument_context(scope, min_api='2018-04-01') as c: c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item), validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added. # endregion # region VM Monitor with self.argument_context('vm monitor log show') as c: c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.") c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.") with self.argument_context('vm monitor metrics') as c: c.argument('metricnamespace', options_list=['--namespace'], help='Namespace to query metric definitions for.') with self.argument_context('vm monitor metrics tail') as c: from azure.mgmt.monitor.models import AggregationType c.extra('resource_group_name', required=True) c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) c.argument('metadata', action='store_true') c.argument('dimension', nargs='*', validator=validate_metric_dimension) c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*') c.argument('metrics', nargs='*') c.argument('orderby', help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc') c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.') c.argument('filters', options_list=['--filter']) c.argument('metric_namespace', options_list=['--namespace']) with self.argument_context('vm monitor metrics tail', arg_group='Time') as c: c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.')) c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.')) c.argument('offset', type=get_period_type(as_timedelta=True)) c.argument('interval', arg_group='Time', type=get_period_type()) with self.argument_context('vm monitor metrics list-definitions') as c: c.extra('resource_group_name', required=True) c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) # endregion # region disk encryption set with self.argument_context('disk-encryption-set') as c: c.argument('disk_encryption_set_name', disk_encryption_set_name) c.argument('key_url', help='URL pointing to a key or secret in KeyVault.') c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.') c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']), help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01', options_list=['--enable-auto-key-rotation', '--auto-rotation'], help='Enable automatic rotation of keys.') # endregion # region DiskAccess with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c: c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) # endRegion # region Capacity with self.argument_context('capacity reservation group') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'], help='The name of the capacity reservation group.') c.argument('tags', tags_type) with self.argument_context('capacity reservation group create') as c: c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.') with self.argument_context('capacity reservation group show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.') with self.argument_context('capacity reservation group list') as c: c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.') c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.') with self.argument_context('capacity reservation') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'], help='The name of the capacity reservation group.') c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'], help='The name of the capacity reservation.') c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.') c.argument('tags', tags_type) with self.argument_context('capacity reservation create') as c: c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.') c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called "CapacityReservationSupported" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.') with self.argument_context('capacity reservation show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.') # endRegion # region Restore point with self.argument_context('restore-point') as c: c.argument('restore_point_collection_name', options_list=['--collection-name'], help='The name of the restore point collection.') with self.argument_context('restore-point create') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('exclude_disks', nargs='+', help='List of disk resource ids that the ' 'customer wishes to exclude from the restore point. If no disks are specified, all disks will be ' 'included.') c.argument('source_restore_point', help='Resource Id of the source restore point from which a copy needs to be created') with self.argument_context('restore-point show') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='Show the instance view of a restore point.') with self.argument_context('restore-point delete') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') with self.argument_context('restore-point wait') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') # endRegion # region Restore point collection with self.argument_context('restore-point collection create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('source_id', help='Resource Id of the source resource used to create this restore point collection', arg_group='Source') with self.argument_context('restore-point collection update') as c: c.argument('tags', tags_type) with self.argument_context('restore-point collection show') as c: c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('restore_points', action='store_true', help='Show all contained restore points in the restore point collection.')
31,106
def execute_fetch_incidents_command(client): """ runs the fetch incidents task. It will check the fetch_interval first before making any API calls to the MobileIron API's :type client: ``Client`` :param client: MobileIron client to use """ params = demisto.params() last_run = demisto.getLastRun() datetime_now = datetime.utcnow() datetime_now_iso = datetime_now.isoformat() fetch_interval = int(params.get('fetch_interval')) demisto.debug(f'MobileIron UEM - last run {last_run} now_utc {datetime_now_iso} fetch_interval {fetch_interval}') should_run = should_run_fetch_incidents(last_run, datetime_now, fetch_interval) if should_run: admin_space_id = params.get('admin_space_id') incident_type = params.get('incidentType') max_fetch = int(params.get('max_fetch')) incidents = fetch_incidents(client=client, admin_space_id=admin_space_id, incident_type=incident_type, max_fetch=max_fetch) demisto.incidents(incidents) demisto.setLastRun({'time': datetime_now_iso}) else: demisto.incidents([])
def execute_fetch_incidents_command(client): """ runs the fetch incidents task. It will check the fetch_interval first before making any API calls to the MobileIron API's :type client: ``Client`` :param client: MobileIron client to use """ params = demisto.params() last_run = demisto.getLastRun() datetime_now = datetime.utcnow() datetime_now_iso = datetime_now.isoformat() fetch_interval = int(params.get('fetch_interval')) demisto.debug(f'MobileIron UEM - last run {last_run} now_utc {datetime_now_iso} fetch_interval {fetch_interval}') should_run = should_run_fetch_incidents(last_run, datetime_now, fetch_interval) if should_run: admin_space_id = params.get('admin_space_id') incident_type = params.get('incidentType') max_fetch = min(int(params.get('max_fetch')), 200) incidents = fetch_incidents(client=client, admin_space_id=admin_space_id, incident_type=incident_type, max_fetch=max_fetch) demisto.incidents(incidents) demisto.setLastRun({'time': datetime_now_iso}) else: demisto.incidents([])
28,283
def guids_from_list_str(s: str) -> Optional[Tuple[str, ...]]: """ Get tuple of guids from a python/json string representation of a list. Extracts the guids from a string representation of a list, tuple, or set of guids or a single guid. Args: s: input string Returns: Extracted guids as a tuple of strings. If a provided string does not match the format, `None` will be returned. For an empty list/tuple/set or empty string an empty set is returned. Examples: >>> guids_from_str( "['07fd7195-c51e-44d6-a085-fa8274cf00d6', \ '070d7195-c51e-44d6-a085-fa8274cf00d6']") will return ('07fd7195-c51e-44d6-a085-fa8274cf00d6', '070d7195-c51e-44d6-a085-fa8274cf00d6') """ parsed = (ast.parse(s, mode='eval')).body if not isinstance(parsed, (ast.List, ast.Tuple, ast.Set)): return None if not all([isinstance(e, ast.Constant) for e in parsed.elts]): return None return tuple (v.value for v in parsed.elts)
def guids_from_list_str(s: str) -> Optional[Tuple[str, ...]]: """ Get tuple of guids from a python/json string representation of a list. Extracts the guids from a string representation of a list, tuple, or set of guids or a single guid. Args: s: input string Returns: Extracted guids as a tuple of strings. If a provided string does not match the format, `None` will be returned. For an empty list/tuple/set or empty string an empty set is returned. Examples: >>> guids_from_str( "['07fd7195-c51e-44d6-a085-fa8274cf00d6', \ '070d7195-c51e-44d6-a085-fa8274cf00d6']") will return ('07fd7195-c51e-44d6-a085-fa8274cf00d6', '070d7195-c51e-44d6-a085-fa8274cf00d6') """ parsed = (ast.parse(s, mode='eval')).body if not isinstance(parsed, (ast.List, ast.Tuple, ast.Set)): return None if not all([isinstance(e, ast.Constant) for e in parsed.elts]): return None return tuple(v.value for v in parsed.elts)
47,273
def export( tokenizer: PreTrainedTokenizer, model: Union[PreTrainedModel, TFPreTrainedModel], config: OnnxConfig, opset: int, output: Path, ) -> Tuple[List[str], List[str]]: """ Export a PyTorch/Tensorflow backed pipeline to ONNX Intermediate Representation (IR) Args: tokenizer: model: config: opset: output: Returns: """ from ..file_utils import torch_version if not (is_torch_available() or is_tf_available()): raise ImportError( "Cannot convert because neither PyTorch nor Tensorflow are not installed. Please install torch or tensorflow first." ) if is_torch_available(): if not is_torch_onnx_dict_inputs_support_available(): raise AssertionError(f"Unsupported PyTorch version, minimum required is 1.8.0, got: {torch_version}") if issubclass(type(model), PreTrainedModel): import torch from torch.onnx import export logger.info(f"Using framework PyTorch: {torch.__version__}") with torch.no_grad(): model.config.return_dict = True model.eval() # Check if we need to override certain configuration item if config.values_override is not None: logger.info(f"Overriding {len(config.values_override)} configuration item(s)") for override_config_key, override_config_value in config.values_override.items(): logger.info(f"\t- {override_config_key} -> {override_config_value}") setattr(model.config, override_config_key, override_config_value) # Ensure inputs match # TODO: Check when exporting QA we provide "is_pair=True" model_inputs = config.generate_dummy_inputs(tokenizer, framework=TensorType.PYTORCH) inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) onnx_outputs = list(config.outputs.keys()) if not inputs_match: raise ValueError("Model and config inputs doesn't match") config.patch_ops() # export can works with named args but the dict containing named args as to be last element of the args tuple export( model, (model_inputs,), f=output.as_posix(), input_names=list(config.inputs.keys()), output_names=onnx_outputs, dynamic_axes={name: axes for name, axes in chain(config.inputs.items(), config.outputs.items())}, do_constant_folding=True, use_external_data_format=config.use_external_data_format(model.num_parameters()), enable_onnx_checker=True, opset_version=opset, ) config.restore_ops() return matched_inputs, onnx_outputs else: import tensorflow as tf import onnx import tf2onnx model.config.return_dict = True # Check if we need to override certain configuration item if config.values_override is not None: logger.info(f"Overriding {len(config.values_override)} configuration item(s)") for override_config_key, override_config_value in config.values_override.items(): logger.info(f"\t- {override_config_key} -> {override_config_value}") setattr(model.config, override_config_key, override_config_value) # Ensure inputs match model_inputs = config.generate_dummy_inputs(tokenizer, framework=TensorType.TENSORFLOW) inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) onnx_outputs = list(config.outputs.keys()) input_signature = [tf.TensorSpec.from_tensor(tensor, name=key) for key, tensor in model_inputs.items()] onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature, opset=opset) onnx.save(onnx_model, output.as_posix()) config.restore_ops() return matched_inputs, onnx_outputs
def export( tokenizer: PreTrainedTokenizer, model: Union[PreTrainedModel, TFPreTrainedModel], config: OnnxConfig, opset: int, output: Path, ) -> Tuple[List[str], List[str]]: """ Export a PyTorch or TensorFlow model to ONNX Intermediate Representation (IR) Args: tokenizer: model: config: opset: output: Returns: """ from ..file_utils import torch_version if not (is_torch_available() or is_tf_available()): raise ImportError( "Cannot convert because neither PyTorch nor Tensorflow are not installed. Please install torch or tensorflow first." ) if is_torch_available(): if not is_torch_onnx_dict_inputs_support_available(): raise AssertionError(f"Unsupported PyTorch version, minimum required is 1.8.0, got: {torch_version}") if issubclass(type(model), PreTrainedModel): import torch from torch.onnx import export logger.info(f"Using framework PyTorch: {torch.__version__}") with torch.no_grad(): model.config.return_dict = True model.eval() # Check if we need to override certain configuration item if config.values_override is not None: logger.info(f"Overriding {len(config.values_override)} configuration item(s)") for override_config_key, override_config_value in config.values_override.items(): logger.info(f"\t- {override_config_key} -> {override_config_value}") setattr(model.config, override_config_key, override_config_value) # Ensure inputs match # TODO: Check when exporting QA we provide "is_pair=True" model_inputs = config.generate_dummy_inputs(tokenizer, framework=TensorType.PYTORCH) inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) onnx_outputs = list(config.outputs.keys()) if not inputs_match: raise ValueError("Model and config inputs doesn't match") config.patch_ops() # export can works with named args but the dict containing named args as to be last element of the args tuple export( model, (model_inputs,), f=output.as_posix(), input_names=list(config.inputs.keys()), output_names=onnx_outputs, dynamic_axes={name: axes for name, axes in chain(config.inputs.items(), config.outputs.items())}, do_constant_folding=True, use_external_data_format=config.use_external_data_format(model.num_parameters()), enable_onnx_checker=True, opset_version=opset, ) config.restore_ops() return matched_inputs, onnx_outputs else: import tensorflow as tf import onnx import tf2onnx model.config.return_dict = True # Check if we need to override certain configuration item if config.values_override is not None: logger.info(f"Overriding {len(config.values_override)} configuration item(s)") for override_config_key, override_config_value in config.values_override.items(): logger.info(f"\t- {override_config_key} -> {override_config_value}") setattr(model.config, override_config_key, override_config_value) # Ensure inputs match model_inputs = config.generate_dummy_inputs(tokenizer, framework=TensorType.TENSORFLOW) inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) onnx_outputs = list(config.outputs.keys()) input_signature = [tf.TensorSpec.from_tensor(tensor, name=key) for key, tensor in model_inputs.items()] onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature, opset=opset) onnx.save(onnx_model, output.as_posix()) config.restore_ops() return matched_inputs, onnx_outputs