content
stringlengths
7
928k
avg_line_length
float64
3.5
33.8k
max_line_length
int64
6
139k
alphanum_fraction
float64
0.08
0.96
licenses
sequence
repository_name
stringlengths
7
104
path
stringlengths
4
230
size
int64
7
928k
lang
stringclasses
1 value
from typing import Dict, Tuple import torch import torch.nn as nn from neuralhydrology.modelzoo.basemodel import BaseModel from neuralhydrology.modelzoo.fc import FC from neuralhydrology.modelzoo.head import get_head from neuralhydrology.utils.config import Config class EALSTM(BaseModel): """Entity-Aware LSTM (EA-LSTM) model class. This model has been proposed by Kratzert et al. [#]_ as a variant of the standard LSTM. The main difference is that the input gate of the EA-LSTM is modulated using only the static inputs, while the dynamic (time series) inputs are used in all other parts of the model (i.e. forget gate, cell update gate and output gate). To control the initial forget gate bias, use the config argument `initial_forget_bias`. Often it is useful to set this value to a positive value at the start of the model training, to keep the forget gate closed and to facilitate the gradient flow. The `EALSTM` class does only support single timescale predictions. Use `MTSLSTM` to train an LSTM-based model and get predictions on multiple temporal resolutions at the same time. Parameters ---------- cfg : Config The run configuration. References ---------- .. [#] Kratzert, F., Klotz, D., Shalev, G., Klambauer, G., Hochreiter, S., and Nearing, G.: Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets, Hydrol. Earth Syst. Sci., 23, 5089–5110, https://doi.org/10.5194/hess-23-5089-2019, 2019. """ # specify submodules of the model that can later be used for finetuning. Names must match class attributes module_parts = ['input_gate', 'dynamic_gates', 'head'] def __init__(self, cfg: Config): super(EALSTM, self).__init__(cfg=cfg) self._hidden_size = cfg.hidden_size input_size_stat = len(cfg.static_inputs + cfg.camels_attributes + cfg.hydroatlas_attributes) if cfg.use_basin_id_encoding: input_size_stat += cfg.number_of_basins # If hidden units for a embedding network are specified, create FC, otherwise single linear layer if cfg.embedding_hiddens: self.input_gate = FC(cfg=cfg) else: self.input_gate = nn.Linear(input_size_stat, cfg.hidden_size) # create tensors of learnable parameters self.dynamic_gates = _DynamicGates(cfg=cfg) self.dropout = nn.Dropout(p=cfg.output_dropout) self.head = get_head(cfg=cfg, n_in=cfg.hidden_size, n_out=self.output_size) def _cell(self, x: torch.Tensor, i: torch.Tensor, states: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: """Single time step logic of EA-LSTM cell""" h_0, c_0 = states # calculate gates gates = self.dynamic_gates(h_0, x) f, o, g = gates.chunk(3, 1) c_1 = torch.sigmoid(f) * c_0 + i * torch.tanh(g) h_1 = torch.sigmoid(o) * torch.tanh(c_1) return h_1, c_1 def forward(self, data: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """Perform a forward pass on the EA-LSTM model. Parameters ---------- data : Dict[str, torch.Tensor] Dictionary, containing input features as key-value pairs. Returns ------- Dict[str, torch.Tensor] Model outputs and intermediate states as a dictionary. - `y_hat`: model predictions of shape [batch size, sequence length, number of target variables]. - `h_n`: hidden state at the last time step of the sequence of shape [batch size, sequence length, number of target variables]. - `c_n`: cell state at the last time step of the sequence of shape [batch size, sequence length, number of target variables]. """ # transpose to [seq_length, batch_size, n_features] x_d = data['x_d'].transpose(0, 1) if 'x_s' in data and 'x_one_hot' in data: x_s = torch.cat([data['x_s'], data['x_one_hot']], dim=-1) elif 'x_s' in data: x_s = data['x_s'] elif 'x_one_hot' in data: x_s = data['x_one_hot'] else: raise ValueError('Need x_s or x_one_hot in forward pass.') # TODO: move hidden and cell state initialization to init and only reset states in forward pass to zero. h_t = x_d.data.new(x_d.shape[1], self._hidden_size).zero_() c_t = x_d.data.new(x_d.shape[1], self._hidden_size).zero_() # empty lists to temporally store all intermediate hidden/cell states h_n, c_n = [], [] # calculate input gate only once because inputs are static i = torch.sigmoid(self.input_gate(x_s)) # perform forward steps over input sequence for x_dt in x_d: h_t, c_t = self._cell(x_dt, i, (h_t, c_t)) # store intermediate hidden/cell state in list h_n.append(h_t) c_n.append(c_t) h_n = torch.stack(h_n, 0).transpose(0, 1) c_n = torch.stack(c_n, 0).transpose(0, 1) pred = {'h_n': h_n, 'c_n': c_n} pred.update(self.head(self.dropout(h_n))) return pred class _DynamicGates(nn.Module): """Internal class to wrap the dynamic gate parameters into a dedicated PyTorch Module""" def __init__(self, cfg: Config): super(_DynamicGates, self).__init__() self.cfg = cfg self.weight_ih = nn.Parameter(torch.FloatTensor(len(cfg.dynamic_inputs), 3 * cfg.hidden_size)) self.weight_hh = nn.Parameter(torch.FloatTensor(cfg.hidden_size, 3 * cfg.hidden_size)) self.bias = nn.Parameter(torch.FloatTensor(3 * cfg.hidden_size)) # initialize parameters self._reset_parameters() def _reset_parameters(self): """Special initialization of certain model weights.""" nn.init.orthogonal_(self.weight_ih.data) weight_hh_data = torch.eye(self.cfg.hidden_size) weight_hh_data = weight_hh_data.repeat(1, 3) self.weight_hh.data = weight_hh_data nn.init.constant_(self.bias.data, val=0) if self.cfg.initial_forget_bias is not None: self.bias.data[:self.cfg.hidden_size] = self.cfg.initial_forget_bias def forward(self, h: torch.Tensor, x_d: torch.Tensor): gates = h @ self.weight_hh + x_d @ self.weight_ih + self.bias return gates
40.937107
119
0.649101
[ "BSD-3-Clause" ]
visr/neuralhydrology
neuralhydrology/modelzoo/ealstm.py
6,511
Python
import logging from pathlib import Path from taskcat._config import Config from taskcat.iam_policy.policy import CFNPolicyGenerator LOG = logging.getLogger(__name__) class GenerateIAMPolicy: """ [ALPHA] Introspects CFN Template(s) and generates an IAM policy necessary to successfully launch the template(s) """ CLINAME = "generate-iam-policy" def __init__( self, output_file: str = "./cfn_stack_policy.json", project_root: str = "./" ): project_root_path = Path(project_root).expanduser().resolve() config = Config.create(project_root=project_root_path) CFNPolicyGenerator(config, output_file).generate_policy()
27.08
116
0.725258
[ "Apache-2.0" ]
GlennChia/taskcat
taskcat/_cli_modules/generate_iam_policy.py
677
Python
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. import numpy as np import os from functools import partial import logging import time import paddle import paddle.fluid as fluid import argparse import network import reader logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger("fluid") logger.setLevel(logging.INFO) def parse_args(): parser = argparse.ArgumentParser("gnn") parser.add_argument( '--train_path', type=str, default='./data/diginetica/train.txt', help='dir of training data') parser.add_argument( '--config_path', type=str, default='./data/diginetica/config.txt', help='dir of config') parser.add_argument( '--model_path', type=str, default='./saved_model', help="path of model parameters") parser.add_argument( '--epoch_num', type=int, default=30, help='number of epochs to train for') parser.add_argument( '--batch_size', type=int, default=100, help='input batch size') parser.add_argument( '--hidden_size', type=int, default=100, help='hidden state size') parser.add_argument( '--l2', type=float, default=1e-5, help='l2 penalty') parser.add_argument( '--lr', type=float, default=0.001, help='learning rate') parser.add_argument( '--step', type=int, default=1, help='gnn propogation steps') parser.add_argument( '--lr_dc', type=float, default=0.1, help='learning rate decay rate') parser.add_argument( '--lr_dc_step', type=int, default=3, help='the number of steps after which the learning rate decay') parser.add_argument( '--use_cuda', type=int, default=0, help='whether to use gpu') parser.add_argument( '--use_parallel', type=int, default=1, help='whether to use parallel executor') parser.add_argument( '--enable_ce', action='store_true', help='If set, run the task with continuous evaluation logs.') return parser.parse_args() def train(): args = parse_args() if args.enable_ce: SEED = 102 fluid.default_main_program().random_seed = SEED fluid.default_startup_program().random_seed = SEED batch_size = args.batch_size items_num = reader.read_config(args.config_path) loss, acc, py_reader, feed_datas = network.network(items_num, args.hidden_size, args.step) data_reader = reader.Data(args.train_path, True) logger.info("load data complete") use_cuda = True if args.use_cuda else False use_parallel = True if args.use_parallel else False place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) step_per_epoch = data_reader.length // batch_size optimizer = fluid.optimizer.Adam( learning_rate=fluid.layers.exponential_decay( learning_rate=args.lr, decay_steps=step_per_epoch * args.lr_dc_step, decay_rate=args.lr_dc), regularization=fluid.regularizer.L2DecayRegularizer( regularization_coeff=args.l2)) optimizer.minimize(loss) exe.run(fluid.default_startup_program()) all_vocab = fluid.global_scope().var("all_vocab").get_tensor() all_vocab.set( np.arange(1, items_num).astype("int64").reshape((-1, 1)), place) feed_list = [e.name for e in feed_datas] if use_parallel: train_exe = fluid.ParallelExecutor( use_cuda=use_cuda, loss_name=loss.name) else: train_exe = exe logger.info("begin train") total_time = [] ce_info = [] start_time = time.time() loss_sum = 0.0 acc_sum = 0.0 global_step = 0 PRINT_STEP = 500 py_reader.decorate_paddle_reader(data_reader.reader(batch_size, batch_size * 20, True)) for i in range(args.epoch_num): epoch_sum = [] py_reader.start() try: while True: res = train_exe.run(fetch_list=[loss.name, acc.name]) loss_sum += res[0].mean() acc_sum += res[1].mean() epoch_sum.append(res[0].mean()) global_step += 1 if global_step % PRINT_STEP == 0: ce_info.append([loss_sum / PRINT_STEP, acc_sum / PRINT_STEP]) total_time.append(time.time() - start_time) logger.info("global_step: %d, loss: %.4lf, train_acc: %.4lf" % ( global_step, loss_sum / PRINT_STEP, acc_sum / PRINT_STEP)) loss_sum = 0.0 acc_sum = 0.0 start_time = time.time() except fluid.core.EOFException: py_reader.reset() logger.info("epoch loss: %.4lf" % (np.mean(epoch_sum))) save_dir = os.path.join(args.model_path, "epoch_" + str(i)) fetch_vars = [loss, acc] fluid.io.save_inference_model(save_dir, feed_list, fetch_vars, exe) logger.info("model saved in " + save_dir) # only for ce if args.enable_ce: gpu_num = get_cards(args) ce_loss = 0 ce_acc = 0 ce_time = 0 try: ce_loss = ce_info[-1][0] ce_acc = ce_info[-1][1] ce_time = total_time[-1] except: print("ce info error") print("kpis\teach_pass_duration_card%s\t%s" % (gpu_num, ce_time)) print("kpis\ttrain_loss_card%s\t%f" % (gpu_num, ce_loss)) print("kpis\ttrain_acc_card%s\t%f" % (gpu_num, ce_acc)) def get_cards(args): num = 0 cards = os.environ.get('CUDA_VISIBLE_DEVICES') num = len(cards.split(",")) return num if __name__ == "__main__": train()
35.942529
108
0.633674
[ "Apache-2.0" ]
GitHubpenglix666/models
PaddleRec/gnn/train.py
6,254
Python
""" This module provides two implementations for the rod-cutting problem: 1. A naive recursive implementation which has an exponential runtime 2. Two dynamic programming implementations which have quadratic runtime The rod-cutting problem is the problem of finding the maximum possible revenue obtainable from a rod of length ``n`` given a list of prices for each integral piece of the rod. The maximum revenue can thus be obtained by cutting the rod and selling the pieces separately or not cutting it at all if the price of it is the maximum obtainable. """ def naive_cut_rod_recursive(n: int, prices: list): """ Solves the rod-cutting problem via naively without using the benefit of dynamic programming. The results is the same sub-problems are solved several times leading to an exponential runtime Runtime: O(2^n) Arguments ------- n: int, the length of the rod prices: list, the prices for each piece of rod. ``p[i-i]`` is the price for a rod of length ``i`` Returns ------- The maximum revenue obtainable for a rod of length n given the list of prices for each piece. Examples -------- >>> naive_cut_rod_recursive(4, [1, 5, 8, 9]) 10 >>> naive_cut_rod_recursive(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) 30 """ _enforce_args(n, prices) if n == 0: return 0 max_revue = float("-inf") for i in range(1, n + 1): max_revue = max( max_revue, prices[i - 1] + naive_cut_rod_recursive(n - i, prices) ) return max_revue def top_down_cut_rod(n: int, prices: list): """ Constructs a top-down dynamic programming solution for the rod-cutting problem via memoization. This function serves as a wrapper for _top_down_cut_rod_recursive Runtime: O(n^2) Arguments -------- n: int, the length of the rod prices: list, the prices for each piece of rod. ``p[i-i]`` is the price for a rod of length ``i`` Note ---- For convenience and because Python's lists using 0-indexing, length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of length 0. Returns ------- The maximum revenue obtainable for a rod of length n given the list of prices for each piece. Examples ------- >>> top_down_cut_rod(4, [1, 5, 8, 9]) 10 >>> top_down_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) 30 """ _enforce_args(n, prices) max_rev = [float("-inf") for _ in range(n + 1)] return _top_down_cut_rod_recursive(n, prices, max_rev) def _top_down_cut_rod_recursive(n: int, prices: list, max_rev: list): """ Constructs a top-down dynamic programming solution for the rod-cutting problem via memoization. Runtime: O(n^2) Arguments -------- n: int, the length of the rod prices: list, the prices for each piece of rod. ``p[i-i]`` is the price for a rod of length ``i`` max_rev: list, the computed maximum revenue for a piece of rod. ``max_rev[i]`` is the maximum revenue obtainable for a rod of length ``i`` Returns ------- The maximum revenue obtainable for a rod of length n given the list of prices for each piece. """ if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: max_revenue = float("-inf") for i in range(1, n + 1): max_revenue = max( max_revenue, prices[i - 1] + _top_down_cut_rod_recursive(n - i, prices, max_rev), ) max_rev[n] = max_revenue return max_rev[n] def bottom_up_cut_rod(n: int, prices: list): """ Constructs a bottom-up dynamic programming solution for the rod-cutting problem Runtime: O(n^2) Arguments ---------- n: int, the maximum length of the rod. prices: list, the prices for each piece of rod. ``p[i-i]`` is the price for a rod of length ``i`` Returns ------- The maximum revenue obtainable from cutting a rod of length n given the prices for each piece of rod p. Examples ------- >>> bottom_up_cut_rod(4, [1, 5, 8, 9]) 10 >>> bottom_up_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) 30 """ _enforce_args(n, prices) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. max_rev = [float("-inf") for _ in range(n + 1)] max_rev[0] = 0 for i in range(1, n + 1): max_revenue_i = max_rev[i] for j in range(1, i + 1): max_revenue_i = max(max_revenue_i, prices[j - 1] + max_rev[i - j]) max_rev[i] = max_revenue_i return max_rev[n] def _enforce_args(n: int, prices: list): """ Basic checks on the arguments to the rod-cutting algorithms n: int, the length of the rod prices: list, the price list for each piece of rod. Throws ValueError: if n is negative or there are fewer items in the price list than the length of the rod """ if n < 0: raise ValueError(f"n must be greater than or equal to 0. Got n = {n}") if n > len(prices): raise ValueError( "Each integral piece of rod must have a corresponding " f"price. Got n = {n} but length of prices = {len(prices)}" ) def main(): prices = [6, 10, 12, 15, 20, 23] n = len(prices) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. expected_max_revenue = 36 max_rev_top_down = top_down_cut_rod(n, prices) max_rev_bottom_up = bottom_up_cut_rod(n, prices) max_rev_naive = naive_cut_rod_recursive(n, prices) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
28.379808
88
0.630527
[ "MIT" ]
AlgorithmAndLeetCode/TheAlgorithms-Python
dynamic_programming/rod_cutting.py
5,903
Python
# Copyright (c) 2020 by Fraunhofer Institute for Energy Economics # and Energy System Technology (IEE), Kassel. All rights reserved. # Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. from pandapower.plotting.geo import _node_geometries_from_geodata, \ _transform_node_geometry_to_geodata, _branch_geometries_from_geodata, \ _transform_branch_geometry_to_coords, _convert_xy_epsg def convert_gis_to_geodata(net, node_geodata=True, branch_geodata=True): """ Extracts information on bus and line geodata from the geometries of a geopandas geodataframe. :param net: The net for which to convert the geodata :type net: pandapowerNet :param node_geodata: flag if to extract x and y values for bus geodata :type node_geodata: bool, default True :param branch_geodata: flag if to extract coordinates values for line geodata :type branch_geodata: bool, default True :return: No output. """ if node_geodata: _transform_node_geometry_to_geodata(net.junction_geodata) if branch_geodata: _transform_branch_geometry_to_coords(net.pipe_geodata) def convert_geodata_to_gis(net, epsg=31467, node_geodata=True, branch_geodata=True): """ Transforms the bus and line geodata of a net into a geopandaas geodataframe with the respective geometries. :param net: The net for which to convert the geodata :type net: pandapowerNet :param epsg: current epsg projection :type epsg: int, default 4326 (= WGS84) :param node_geodata: flag if to transform the bus geodata table :type node_geodata: bool, default True :param branch_geodata: flag if to transform the line geodata table :type branch_geodata: bool, default True :return: No output. """ if node_geodata: net["bus_geodata"] = _node_geometries_from_geodata(net["bus_geodata"], epsg) if branch_geodata: net["line_geodata"] = _branch_geometries_from_geodata(net["line_geodata"], epsg) net["gis_epsg_code"] = epsg def convert_epsg_bus_geodata(net, epsg_in=4326, epsg_out=31467): """ Converts bus geodata in net from epsg_in to epsg_out :param net: The pandapower network :type net: pandapowerNet :param epsg_in: current epsg projection :type epsg_in: int, default 4326 (= WGS84) :param epsg_out: epsg projection to be transformed to :type epsg_out: int, default 31467 (= Gauss-Krüger Zone 3) :return: net - the given pandapower network (no copy!) """ net['bus_geodata'].loc[:, "x"], net['bus_geodata'].loc[:, "y"] = _convert_xy_epsg( net['bus_geodata'].loc[:, "x"], net['bus_geodata'].loc[:, "y"], epsg_in, epsg_out) return net
41.861538
99
0.728776
[ "BSD-3-Clause" ]
Fraank-dash/pandapipes
pandapipes/plotting/geo.py
2,722
Python
import numpy as np import pytest from allopy import PortfolioRegretOptimizer, RegretOptimizer, get_option from .data import Test1, Test2, assets, scenarios from .funcs import cvar_fun, obj_max_returns @pytest.mark.parametrize("config", [Test1, Test2]) def test_regret_optimizer(config, main_cubes, cvar_cubes): opt = RegretOptimizer(len(assets), len(scenarios), config.prob.as_array(), sum_to_1=True) opt.set_bounds(config.lb.as_array(), config.ub.as_array()) obj_funcs, constraint_funcs = [], [] for i, scenario in enumerate(scenarios): obj_funcs.append(obj_max_returns(main_cubes[i])) constraint_funcs.append(cvar_fun(cvar_cubes[i], config.cvar[scenario])) opt.set_max_objective(obj_funcs) opt.add_inequality_constraint(constraint_funcs) opt.optimize() assert scenario_solution_equal_or_better(obj_funcs, opt.solution.scenario_optimal, config.solutions) or \ regret_is_lower(opt.solution.proportions, config.proportions, opt.solution.scenario_optimal, obj_funcs, config.prob.as_array()) @pytest.mark.parametrize("config", [Test1]) def test_portfolio_regret_optimizer(config, main_cubes, cvar_cubes): opt = PortfolioRegretOptimizer(main_cubes, cvar_cubes, config.prob.as_array(), rebalance=True, sum_to_1=True, time_unit='quarterly') opt.set_bounds(config.lb.as_array(), config.ub.as_array()) opt.maximize_returns(max_cvar=config.cvar.as_array()) obj_funcs = opt._objectives.max_returns assert scenario_solution_equal_or_better(obj_funcs, opt.solution.scenario_optimal, config.solutions) or \ regret_is_lower(opt.solution.proportions, config.proportions, opt.solution.scenario_optimal, obj_funcs, config.prob.as_array()) def scenario_solution_equal_or_better(obj_funcs, solutions, expected): results = [] for f, w, t, in zip(obj_funcs, solutions, expected): diff = (f(w) - f(t)) / get_option("F.SCALE") results.append(round(diff, 3) >= 0) return np.alltrue(results) def regret_is_lower(p0, p1, solutions, obj_funcs, prob): def regret(p): f_values = np.array([obj_funcs[i](s) for i, s in enumerate(solutions)]) cost = f_values - np.array([f(p @ solutions) for f in obj_funcs]) cost = np.asarray(cost ** 2) return 100 * sum(prob * cost) return regret(p0) <= regret(p1)
39.892308
109
0.66371
[ "MIT" ]
wangcj05/allopy
tests/regret/portfolio/test_portfolio_regret_optimizer.py
2,593
Python
# -*- coding: UTF-8 -*- import datetime import json from django.contrib.auth.hashers import check_password, make_password from django.core import serializers from django.db import connection from django.http import HttpResponse from django.shortcuts import render from django.views.decorators.csrf import csrf_exempt from cmdb.models import host, hostUser, dbGroup, dbInstance from utils.jsonExt import DateEncoder from utils.logUtil import getLogger # from cmdb.models import dbCluster logger = getLogger() @csrf_exempt def addChangeHostInfo(request): ''' 新增主机 修改主机 ''' v_hostId = request.POST.get('host_id') v_businessName = request.POST.get('business_name') v_serviceEnv = request.POST.get('service_env') v_hostName = request.POST.get('host_name') v_intranetIpAddr = request.POST.get('intranet_ipaddr') v_publicIpAddr = request.POST.get('public_ipaddr') v_sshPort = request.POST.get('ssh_port') v_hostType = request.POST.get('host_type') v_hostRole = request.POST.get('host_role') v_hostDesc = request.POST.get('host_desc') print(v_hostId, v_businessName, v_serviceEnv, v_hostName, v_intranetIpAddr, v_publicIpAddr, v_sshPort, v_hostType, v_hostRole, v_hostDesc) if v_hostId == '' or v_hostId is None: # 新增 try: hostObj = host(businessName=v_businessName, serviceEnv=v_serviceEnv, hostName=v_hostName, intranetIpAddr=v_intranetIpAddr, publicIpAddr=v_publicIpAddr, sshPort=v_sshPort, hostType=v_hostType, hostRole=v_hostRole, hostDesc=v_hostDesc) hostObj.save() result = {'status':1, 'msg':'保存成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: result = {'status':2, 'msg':'保存失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: # 修改 try: hostObj = host.objects.filter(id=v_hostId) hostObj.update(businessName=v_businessName, serviceEnv=v_serviceEnv, hostName=v_hostName, intranetIpAddr=v_intranetIpAddr, publicIpAddr=v_publicIpAddr, sshPort=v_sshPort, hostType=v_hostType, hostRole=v_hostRole, hostDesc=v_hostDesc) # masterConfigObj.save() result = {'status':1, 'msg':'修改成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: result = {'status':2, 'msg':'修改失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def getHostDetailInfo(request): hostId = request.POST['hostId'] try: hostObj = host.objects.get(id=hostId) hostJson = hostObj.toJSON() result = {'status':1, 'msg':'请求成功', 'obj':hostJson} print(result) return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def delHost(request): hostId = request.POST['hostId'] if hostId == "" or hostId is None: result = {'status':3, 'msg':'未选中任何记录!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: try: delResult = host.objects.filter(id=hostId).delete() print(delResult) result = {'status':1, 'msg':'删除成功!', 'data':delResult} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) result = {'status':2, 'msg':'删除失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def addChangeHostUserInfo(request): ''' 新增主机用户 修改主机用户 ''' v_hostUserId = request.POST.get('host_user_id') v_hostId = request.POST.get('host_id') v_hostUser = request.POST.get('host_user') v_hostPasswd = request.POST.get('host_passwd') v_userDesc = request.POST.get('user_desc') print(v_hostUserId, v_hostId, v_hostUser, v_hostPasswd, v_userDesc) if v_hostUserId == '' or v_hostUserId is None: # 新增 try: hostObj = host.objects.get(id=v_hostId) hostUserObj = hostUser(hostUser=v_hostUser, hostPasswd=v_hostPasswd, userDesc=v_userDesc, host=hostObj) hostUserObj.save() result = {'status':1, 'msg':'保存成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: logger.error(str(e)) result = {'status':2, 'msg':'保存失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: # 修改 try: hostUserObj = hostUser.objects.filter(id=v_hostUserId) hostUserObj.update(hostUser=v_hostUser, hostPasswd=v_hostPasswd, userDesc=v_userDesc) # masterConfigObj.save() result = {'status':1, 'msg':'修改成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: logger.error(str(e)) result = {'status':2, 'msg':'修改失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def getHostUserDetailInfo(request): hostUserId = request.POST['hostUserId'].strip() try: hostUserInfo = hostUser.objects.filter(id=hostUserId) hostUserJson = serializers.serialize("json", hostUserInfo, use_natural_foreign_keys=True) result = {'status':1, 'msg':'请求成功', 'hostUserJson':hostUserJson} print(result) return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def delHostUser(request): hostUserId = request.POST['hostUserId'] if hostUserId == "" or hostUserId is None: result = {'status':3, 'msg':'未选中任何记录!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: try: delResult = hostUser.objects.filter(id=hostUserId).delete() print(delResult) logger.error(delResult) result = {'status':1, 'msg':'删除成功!', 'data':delResult} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) logger.error(e) result = {'status':2, 'msg':'删除失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def addChangeDbGroupInfo(request): ''' 新增数据库组 修改数据库组 ''' v_groupId = request.POST.get('group_id') v_businessName = request.POST.get('business_name') v_groupName = request.POST.get('group_name') v_groupStatus = request.POST.get('group_status') v_groupDesc = request.POST.get('group_desc') v_groupEnv = request.POST.get('group_env') print(v_groupId, v_businessName, v_groupName, v_groupEnv, v_groupStatus, v_groupDesc) logger.info("保存或修改数据库组信息,接收前端参数:", v_groupId, v_businessName, v_groupName, v_groupEnv, v_groupStatus, v_groupDesc) if v_groupId == '' or v_groupId is None: # 新增 try: dbGroupObj = dbGroup(businessName=v_businessName, groupName=v_groupName, groupEnv=v_groupEnv, groupStatus=v_groupStatus, groupDesc=v_groupDesc) dbGroupObj.save() result = {'status':1, 'msg':'保存成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: logger.error(str(e)) result = {'status':2, 'msg':'保存失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: # 修改 try: dbGroupObj = dbGroup.objects.filter(id=v_groupId) dbGroupObj.update(businessName=v_businessName, groupName=v_groupName, groupEnv=v_groupEnv, groupStatus=v_groupStatus, groupDesc=v_groupDesc) # masterConfigObj.save() result = {'status':1, 'msg':'修改成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: logger.error(str(e)) result = {'status':2, 'msg':'修改失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') # @csrf_exempt # def getDbClusterDetailInfo(request): # clusterId = request.POST['clusterId'] # # try: # dbClusterObj = dbCluster.objects.get(id=clusterId) # dbClusterJson = dbClusterObj.toJSON() # # result = {'status':1, 'msg':'请求成功', 'obj':dbClusterJson} # print(result) # return HttpResponse(json.dumps(result), content_type='application/json') # except Exception as e: # print(e) # result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def getDbGroupDetailInfo(request): groupId = request.POST['groupId'] try: dbGroupObj = dbGroup.objects.get(id=groupId) dbGroupJson = dbGroupObj.toJSON() result = {'status':1, 'msg':'请求成功', 'obj':dbGroupJson} print(result) return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def addChangeDbInstanceInfo(request): ''' 新增数据库实例 修改数据库实例 ''' v_instanceId = request.POST.get('instance_id') v_groupId = request.POST.get('group_id') v_host_id = request.POST.get('host_id') v_instanceName = request.POST.get('instance_env') v_instanceType = request.POST.get('instance_type') v_portNum = request.POST.get('port_num') v_instanceRole = request.POST.get('instance_role') v_instanceStatus = request.POST.get('instance_status') v_instanceDesc = request.POST.get('instance_desc') print(v_instanceId, v_groupId, v_host_id, v_instanceName, v_instanceType, v_portNum, v_instanceRole, v_instanceStatus, v_instanceDesc) logger.info("保存或修改数据库实例信息,接收前端参数:", v_instanceId, v_groupId, v_host_id, v_instanceName, v_instanceType, v_portNum, v_instanceRole, v_instanceStatus, v_instanceDesc) if v_instanceId == '' or v_instanceId is None: # 新增 try: dbGroupObj = dbGroup.objects.get(id=v_groupId) hostObj = host.objects.get(id=v_host_id) print(hostObj) dbInstanceObj = dbInstance(groupName=dbGroupObj, host=hostObj, instanceName=v_instanceName, instanceType=v_instanceType, portNum=v_portNum, instanceRole=v_instanceRole, instanceStatus=v_instanceStatus, instanceDesc=v_instanceDesc) dbInstanceObj.save() result = {'status':1, 'msg':'保存成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) logger.error(str(e)) result = {'status':2, 'msg':'保存失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: # 修改 try: dbGroupObj = dbGroup.objects.get(id=v_groupId) hostObj = host.objects.get(id=v_host_id) dbInstanceObj = dbInstance.objects.filter(id=v_instanceId) dbInstanceObj.update(groupName=dbGroupObj, host=hostObj, instanceName=v_instanceName, instanceType=v_instanceType, portNum=v_portNum, instanceRole=v_instanceRole, instanceStatus=v_instanceStatus, instanceDesc=v_instanceDesc) # masterConfigObj.save() result = {'status':1, 'msg':'修改成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: logger.error(str(e)) result = {'status':2, 'msg':'修改失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def getDbInstanceDetailInfo(request): instanceId = request.POST['instanceId'].strip() try: dbInstanceInfo = dbInstance.objects.filter(id=instanceId) dbInstanceJson = serializers.serialize("json", dbInstanceInfo, use_natural_foreign_keys=True) result = {'status':1, 'msg':'请求成功', 'dbInstanceJson':dbInstanceJson} print(result) return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') # conn = connection.cursor() # try: # conn.execute('SELECT cdi.*, ch.host_name, ch.intranet_ip_addr, cdg.group_name FROM cmdb_db_instance cdi inner join cmdb_host ch on cdi.host = ch.id inner join cmdb_db_group cdg on cdi.db_group = cdg.id WHERE cdi.id = %s', [instanceId]) # dbInstanceInfo = conn.fetchall() # print(dbInstanceInfo) # dbInstanceJson = serializers.serialize("json", dbInstanceInfo) # result = {'status':1, 'msg':'请求成功', 'dbInstanceInfo':dbInstanceInfo} # print(result) # return HttpResponse(json.dumps(result, cls=DateEncoder), content_type='application/json') # except Exception as e: # print(e) # result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') # finally: # conn.close() # try: # dbInstanceInfo = dbInstance.objects.raw('SELECT * FROM cmdb_db_instance WHERE id = %d', [instanceId]) # dbInstanceJson = serializers.serialize("json", dbInstanceInfo) # # print(dbInstanceJson[0].fields.host) # print(type(dbInstanceJson[0].fields.host)) # # hostInfo = host.objects.raw('SELECT * FROM cmdb_host WHERE id = %d', [int(dbInstanceJson[0].fields.host)]) # hostJson = serializers.serialize("json", hostInfo) # print(hostJson) # # result = {'status':1, 'msg':'请求成功', 'dbInstanceJson':dbInstanceJson} # print(result) # return HttpResponse(json.dumps(result), content_type='application/json') # except Exception as e: # print(e) # result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def delDbInstance(request): instanceId = request.POST['instanceId'] if instanceId == "" or instanceId is None: result = {'status':3, 'msg':'未选中任何记录!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: try: delResult = dbInstance.objects.filter(id=instanceId).delete() print(delResult) logger.error(delResult) result = {'status':1, 'msg':'删除成功!', 'data':delResult} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) logger.error(e) result = {'status':2, 'msg':'删除失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') # @csrf_exempt # def addChangeDbClusterInfo(request): # ''' # 新增集群信息 # 修改集群信息 # ''' # v_clusterId = request.POST.get('cluster_id') # v_clusterName = request.POST.get('cluster_name') # v_clusterStatus = request.POST.get('cluster_status') # v_clusterDesc = request.POST.get('cluster_desc') # # print("begin add Cluster: ", v_clusterId, v_clusterName, v_clusterStatus, v_clusterDesc) # # if v_clusterId == '' or v_clusterId is None: # # 新增 # try: # dbClusterObj = dbCluster(clusterName=v_clusterName, clusterStatus=v_clusterStatus, clusterDesc=v_clusterDesc) # dbClusterObj.save() # result = {'status':1, 'msg':'保存成功!', 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') # except Exception as e: # logger.error(str(e)) # result = {'status':2, 'msg':'保存失败!', 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') # else: # # 修改 # try: # dbClusterObj = dbCluster.objects.filter(id=v_clusterId) # dbClusterObj.update(clusterName=v_clusterName, clusterStatus=v_clusterStatus, clusterDesc=v_clusterDesc) # # masterConfigObj.save() # result = {'status':1, 'msg':'修改成功!', 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') # except Exception as e: # logger.error(str(e)) # result = {'status':2, 'msg':'修改失败!', 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') # # @csrf_exempt # def delDbCluster(request): # v_clusterId = request.POST['cluster_id'] # # if v_clusterId == "" or v_clusterId is None: # result = {'status':3, 'msg':'未选中任何记录!', 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') # else: # try: # delResult = dbCluster.objects.filter(id=v_clusterId).delete() # print(delResult) # logger.info(delResult) # result = {'status':1, 'msg':'删除成功!', 'data':delResult} # return HttpResponse(json.dumps(result), content_type='application/json') # except Exception as e: # print(e) # logger.error(e) # result = {'status':2, 'msg':'删除失败!', 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json')
44.330969
245
0.624307
[ "Apache-2.0" ]
bopopescu/dbsupport
cmdb/views_ajax.py
19,378
Python
### ### # Imports # ### ### import datetime, os, plistlib, struct, sys, itertools from io import BytesIO if sys.version_info < (3,0): # Force use of StringIO instead of cStringIO as the latter # has issues with Unicode strings from StringIO import StringIO try: FMT_XML = plistlib.FMT_XML FMT_BINARY = plistlib.FMT_BINARY except: FMT_XML = "FMT_XML" FMT_BINARY = "FMT_BINARY" ### ### # Helper Methods # ### ### def _check_py3(): return True if sys.version_info >= (3, 0) else False def _is_binary(fp): if isinstance(fp, _get_inst()): return fp.startswith(b"bplist00") header = fp.read(32) fp.seek(0) return header[:8] == b'bplist00' def _get_inst(): if _check_py3(): return (str) else: return (str, unicode) ### ### # Deprecated Functions - Remapped # ### ### def readPlist(pathOrFile): if not isinstance(pathOrFile, _get_inst()): return load(pathOrFile) with open(pathOrFile, "rb") as f: return load(f) def writePlist(value, pathOrFile): if not isinstance(pathOrFile, _get_inst()): return dump(value, pathOrFile, fmt=FMT_XML, sort_keys=True, skipkeys=False) with open(pathOrFile, "wb") as f: return dump(value, f, fmt=FMT_XML, sort_keys=True, skipkeys=False) ### ### # Remapped Functions # ### ### def load(fp, fmt=None, use_builtin_types=None, dict_type=dict): if _check_py3(): use_builtin_types = True if use_builtin_types == None else use_builtin_types # We need to monkey patch this to allow for hex integers - code taken/modified from # https://github.com/python/cpython/blob/3.8/Lib/plistlib.py if fmt is None: header = fp.read(32) fp.seek(0) for info in plistlib._FORMATS.values(): if info['detect'](header): P = info['parser'] break else: raise plistlib.InvalidFileException() else: P = plistlib._FORMATS[fmt]['parser'] p = P(use_builtin_types=use_builtin_types, dict_type=dict_type) if isinstance(p,plistlib._PlistParser): # Monkey patch! def end_integer(): d = p.get_data() p.add_object(int(d,16) if d.lower().startswith("0x") else int(d)) p.end_integer = end_integer return p.parse(fp) elif not _is_binary(fp): # Is not binary - assume a string - and try to load # We avoid using readPlistFromString() as that uses # cStringIO and fails when Unicode strings are detected # Don't subclass - keep the parser local from xml.parsers.expat import ParserCreate # Create a new PlistParser object - then we need to set up # the values and parse. p = plistlib.PlistParser() # We also need to monkey patch this to allow for other dict_types def begin_dict(attrs): d = dict_type() p.addObject(d) p.stack.append(d) def end_integer(): d = p.getData() p.addObject(int(d,16) if d.lower().startswith("0x") else int(d)) p.begin_dict = begin_dict p.end_integer = end_integer parser = ParserCreate() parser.StartElementHandler = p.handleBeginElement parser.EndElementHandler = p.handleEndElement parser.CharacterDataHandler = p.handleData if isinstance(fp, unicode): # Encode unicode -> string; use utf-8 for safety fp = fp.encode("utf-8") if isinstance(fp,_get_inst()): # It's a string - let's wrap it up fp = StringIO(fp) # Parse it parser.ParseFile(fp) return p.root else: use_builtin_types = False if use_builtin_types == None else use_builtin_types p = _BinaryPlistParser(use_builtin_types=use_builtin_types, dict_type=dict_type) return p.parse(fp) def loads(value, fmt=None, use_builtin_types=None, dict_type=dict): if _check_py3() and isinstance(value, _get_inst()): # If it's a string - encode it value = value.encode() return load(BytesIO(value),fmt=fmt,use_builtin_types=use_builtin_types,dict_type=dict_type) def dump(value, fp, fmt=FMT_XML, sort_keys=True, skipkeys=False): if _check_py3(): plistlib.dump(value, fp, fmt=fmt, sort_keys=sort_keys, skipkeys=skipkeys) else: if fmt == FMT_XML: # We need to monkey patch a bunch here too in order to avoid auto-sorting # of keys writer = plistlib.PlistWriter(fp) def writeDict(d): if d: writer.beginElement("dict") items = sorted(d.items()) if sort_keys else d.items() for key, value in items: if not isinstance(key, (str,unicode)): if skipkeys: continue raise TypeError("keys must be strings") writer.simpleElement("key", key) writer.writeValue(value) writer.endElement("dict") else: writer.simpleElement("dict") writer.writeDict = writeDict writer.writeln("<plist version=\"1.0\">") writer.writeValue(value) writer.writeln("</plist>") elif fmt == FMT_BINARY: # Assume binary at this point writer = _BinaryPlistWriter(fp, sort_keys=sort_keys, skipkeys=skipkeys) writer.write(value) else: # Not a proper format raise ValueError("Unsupported format: {}".format(fmt)) def dumps(value, fmt=FMT_XML, skipkeys=False, sort_keys=True): if _check_py3(): return plistlib.dumps(value, fmt=fmt, skipkeys=skipkeys, sort_keys=sort_keys).decode("utf-8") else: # We avoid using writePlistToString() as that uses # cStringIO and fails when Unicode strings are detected f = StringIO() dump(value, f, fmt=fmt, skipkeys=skipkeys, sort_keys=sort_keys) return f.getvalue() ### ### # Binary Plist Stuff For Py2 # ### ### # From the python 3 plistlib.py source: https://github.com/python/cpython/blob/3.7/Lib/plistlib.py # Tweaked to function on Python 2 class InvalidFileException (ValueError): def __init__(self, message="Invalid file"): ValueError.__init__(self, message) _BINARY_FORMAT = {1: 'B', 2: 'H', 4: 'L', 8: 'Q'} _undefined = object() class _BinaryPlistParser: """ Read or write a binary plist file, following the description of the binary format. Raise InvalidFileException in case of error, otherwise return the root object. see also: http://opensource.apple.com/source/CF/CF-744.18/CFBinaryPList.c """ def __init__(self, use_builtin_types, dict_type): self._use_builtin_types = use_builtin_types self._dict_type = dict_type def parse(self, fp): try: # The basic file format: # HEADER # object... # refid->offset... # TRAILER self._fp = fp self._fp.seek(-32, os.SEEK_END) trailer = self._fp.read(32) if len(trailer) != 32: raise InvalidFileException() ( offset_size, self._ref_size, num_objects, top_object, offset_table_offset ) = struct.unpack('>6xBBQQQ', trailer) self._fp.seek(offset_table_offset) self._object_offsets = self._read_ints(num_objects, offset_size) self._objects = [_undefined] * num_objects return self._read_object(top_object) except (OSError, IndexError, struct.error, OverflowError, UnicodeDecodeError): raise InvalidFileException() def _get_size(self, tokenL): """ return the size of the next object.""" if tokenL == 0xF: m = ord(self._fp.read(1)[0]) & 0x3 s = 1 << m f = '>' + _BINARY_FORMAT[s] return struct.unpack(f, self._fp.read(s))[0] return tokenL def _read_ints(self, n, size): data = self._fp.read(size * n) if size in _BINARY_FORMAT: return struct.unpack('>' + _BINARY_FORMAT[size] * n, data) else: if not size or len(data) != size * n: raise InvalidFileException() return tuple(int.from_bytes(data[i: i + size], 'big') for i in range(0, size * n, size)) def _read_refs(self, n): return self._read_ints(n, self._ref_size) def _read_object(self, ref): """ read the object by reference. May recursively read sub-objects (content of an array/dict/set) """ result = self._objects[ref] if result is not _undefined: return result offset = self._object_offsets[ref] self._fp.seek(offset) token = ord(self._fp.read(1)[0]) tokenH, tokenL = token & 0xF0, token & 0x0F if token == 0: # \x00 or 0x00 result = None elif token == 8: # \x08 or 0x08 result = False elif token == 9: # \x09 or 0x09 result = True # The referenced source code also mentions URL (0x0c, 0x0d) and # UUID (0x0e), but neither can be generated using the Cocoa libraries. elif token == 15: # \x0f or 0x0f result = b'' elif tokenH == 0x10: # int result = 0 for k in xrange((2 << tokenL) - 1): result = (result << 8) + ord(self._fp.read(1)) # result = int.from_bytes(self._fp.read(1 << tokenL), # 'big', signed=tokenL >= 3) elif token == 0x22: # real result = struct.unpack('>f', self._fp.read(4))[0] elif token == 0x23: # real result = struct.unpack('>d', self._fp.read(8))[0] elif token == 0x33: # date f = struct.unpack('>d', self._fp.read(8))[0] # timestamp 0 of binary plists corresponds to 1/1/2001 # (year of Mac OS X 10.0), instead of 1/1/1970. result = (datetime.datetime(2001, 1, 1) + datetime.timedelta(seconds=f)) elif tokenH == 0x40: # data s = self._get_size(tokenL) if self._use_builtin_types: result = self._fp.read(s) else: result = plistlib.Data(self._fp.read(s)) elif tokenH == 0x50: # ascii string s = self._get_size(tokenL) result = self._fp.read(s).decode('ascii') result = result elif tokenH == 0x60: # unicode string s = self._get_size(tokenL) result = self._fp.read(s * 2).decode('utf-16be') # tokenH == 0x80 is documented as 'UID' and appears to be used for # keyed-archiving, not in plists. elif tokenH == 0xA0: # array s = self._get_size(tokenL) obj_refs = self._read_refs(s) result = [] self._objects[ref] = result result.extend(self._read_object(x) for x in obj_refs) # tokenH == 0xB0 is documented as 'ordset', but is not actually # implemented in the Apple reference code. # tokenH == 0xC0 is documented as 'set', but sets cannot be used in # plists. elif tokenH == 0xD0: # dict s = self._get_size(tokenL) key_refs = self._read_refs(s) obj_refs = self._read_refs(s) result = self._dict_type() self._objects[ref] = result for k, o in zip(key_refs, obj_refs): key = self._read_object(k) if isinstance(key, plistlib.Data): key = key.data result[key] = self._read_object(o) else: raise InvalidFileException() self._objects[ref] = result return result def _count_to_size(count): if count < 1 << 8: return 1 elif count < 1 << 16: return 2 elif count << 1 << 32: return 4 else: return 8 _scalars = (str, int, float, datetime.datetime, bytes) class _BinaryPlistWriter (object): def __init__(self, fp, sort_keys, skipkeys): self._fp = fp self._sort_keys = sort_keys self._skipkeys = skipkeys def write(self, value): # Flattened object list: self._objlist = [] # Mappings from object->objectid # First dict has (type(object), object) as the key, # second dict is used when object is not hashable and # has id(object) as the key. self._objtable = {} self._objidtable = {} # Create list of all objects in the plist self._flatten(value) # Size of object references in serialized containers # depends on the number of objects in the plist. num_objects = len(self._objlist) self._object_offsets = [0]*num_objects self._ref_size = _count_to_size(num_objects) self._ref_format = _BINARY_FORMAT[self._ref_size] # Write file header self._fp.write(b'bplist00') # Write object list for obj in self._objlist: self._write_object(obj) # Write refnum->object offset table top_object = self._getrefnum(value) offset_table_offset = self._fp.tell() offset_size = _count_to_size(offset_table_offset) offset_format = '>' + _BINARY_FORMAT[offset_size] * num_objects self._fp.write(struct.pack(offset_format, *self._object_offsets)) # Write trailer sort_version = 0 trailer = ( sort_version, offset_size, self._ref_size, num_objects, top_object, offset_table_offset ) self._fp.write(struct.pack('>5xBBBQQQ', *trailer)) def _flatten(self, value): # First check if the object is in the object table, not used for # containers to ensure that two subcontainers with the same contents # will be serialized as distinct values. if isinstance(value, _scalars): if (type(value), value) in self._objtable: return elif isinstance(value, plistlib.Data): if (type(value.data), value.data) in self._objtable: return elif id(value) in self._objidtable: return # Add to objectreference map refnum = len(self._objlist) self._objlist.append(value) if isinstance(value, _scalars): self._objtable[(type(value), value)] = refnum elif isinstance(value, plistlib.Data): self._objtable[(type(value.data), value.data)] = refnum else: self._objidtable[id(value)] = refnum # And finally recurse into containers if isinstance(value, dict): keys = [] values = [] items = value.items() if self._sort_keys: items = sorted(items) for k, v in items: if not isinstance(k, (str,unicode)): if self._skipkeys: continue raise TypeError("keys must be strings") keys.append(k) values.append(v) for o in itertools.chain(keys, values): self._flatten(o) elif isinstance(value, (list, tuple)): for o in value: self._flatten(o) def _getrefnum(self, value): if isinstance(value, _scalars): return self._objtable[(type(value), value)] elif isinstance(value, plistlib.Data): return self._objtable[(type(value.data), value.data)] else: return self._objidtable[id(value)] def _write_size(self, token, size): if size < 15: self._fp.write(struct.pack('>B', token | size)) elif size < 1 << 8: self._fp.write(struct.pack('>BBB', token | 0xF, 0x10, size)) elif size < 1 << 16: self._fp.write(struct.pack('>BBH', token | 0xF, 0x11, size)) elif size < 1 << 32: self._fp.write(struct.pack('>BBL', token | 0xF, 0x12, size)) else: self._fp.write(struct.pack('>BBQ', token | 0xF, 0x13, size)) def _write_object(self, value): ref = self._getrefnum(value) self._object_offsets[ref] = self._fp.tell() if value is None: self._fp.write(b'\x00') elif value is False: self._fp.write(b'\x08') elif value is True: self._fp.write(b'\x09') elif isinstance(value, int): if value < 0: try: self._fp.write(struct.pack('>Bq', 0x13, value)) except struct.error: raise OverflowError(value) # from None elif value < 1 << 8: self._fp.write(struct.pack('>BB', 0x10, value)) elif value < 1 << 16: self._fp.write(struct.pack('>BH', 0x11, value)) elif value < 1 << 32: self._fp.write(struct.pack('>BL', 0x12, value)) elif value < 1 << 63: self._fp.write(struct.pack('>BQ', 0x13, value)) elif value < 1 << 64: self._fp.write(b'\x14' + value.to_bytes(16, 'big', signed=True)) else: raise OverflowError(value) elif isinstance(value, float): self._fp.write(struct.pack('>Bd', 0x23, value)) elif isinstance(value, datetime.datetime): f = (value - datetime.datetime(2001, 1, 1)).total_seconds() self._fp.write(struct.pack('>Bd', 0x33, f)) elif isinstance(value, plistlib.Data): self._write_size(0x40, len(value.data)) self._fp.write(value.data) elif isinstance(value, (str,unicode)): try: t = value.encode('ascii') self._write_size(0x50, len(value)) except UnicodeEncodeError: t = value.encode('utf-16be') self._write_size(0x60, len(t) // 2) self._fp.write(t) elif isinstance(value, (bytes, bytearray)): self._write_size(0x40, len(value)) self._fp.write(value) elif isinstance(value, (list, tuple)): refs = [self._getrefnum(o) for o in value] s = len(refs) self._write_size(0xA0, s) self._fp.write(struct.pack('>' + self._ref_format * s, *refs)) elif isinstance(value, dict): keyRefs, valRefs = [], [] if self._sort_keys: rootItems = sorted(value.items()) else: rootItems = value.items() for k, v in rootItems: if not isinstance(k, (str,unicode)): if self._skipkeys: continue raise TypeError("keys must be strings") keyRefs.append(self._getrefnum(k)) valRefs.append(self._getrefnum(v)) s = len(keyRefs) self._write_size(0xD0, s) self._fp.write(struct.pack('>' + self._ref_format * s, *keyRefs)) self._fp.write(struct.pack('>' + self._ref_format * s, *valRefs)) else: raise TypeError(value)
34.915194
101
0.556371
[ "MIT" ]
640921008/gibMacOS
Scripts/plist.py
19,762
Python
""" Utils for AiiDA. ---------------- Utilities for making working against AiiDA a bit easier. Mostly here due to historical reasons when AiiDA was rapidly developed. In the future most routines that have now standardized in AiiDA will be removed. """ # pylint: disable=import-outside-toplevel import numpy as np from packaging import version from aiida.orm import User from aiida.cmdline.utils.decorators import with_dbenv BASIC_DATA_TYPES = ['bool', 'float', 'int', 'list', 'str', 'dict'] def get_data_node(data_type, *args, **kwargs): return get_data_class(data_type)(*args, **kwargs) def querybuild(cls, **kwargs): """ Instantiates and returns a QueryBuilder instance. The QueryBuilder's path has one vertice so far, namely this class. Additional parameters (e.g. filters or a label), can be passes as keyword arguments. :param label: Label to give :param filters: filters to apply :param project: projections :returns: a QueryBuilder instance. """ from aiida.orm import QueryBuilder query_builder = QueryBuilder() filters = kwargs.pop('filters', {}) query_builder.append(cls, filters=filters, **kwargs) return query_builder @with_dbenv() def get_data_class(data_type): """Provide access to the orm.data classes with deferred dbenv loading.""" from aiida.plugins import DataFactory from aiida.common.exceptions import MissingEntryPointError data_cls = None try: data_cls = DataFactory(data_type) except MissingEntryPointError as err: raise err return data_cls def get_current_user(): """Get current user.""" current_user = User.objects.get_default() return current_user def copy_parameter(old_parameter): """Assemble a new Dict.""" return get_data_node('dict', dict=old_parameter.get_dict()) def displaced_structure(structure, displacement, entry): disp_structure = structure.clone() displace_position(disp_structure, displacement, entry) return disp_structure def compressed_structure(structure, volume_change): comp_structure = structure.clone() compress_cell(comp_structure, volume_change) return comp_structure def displace_position(structure, displacement, entry): """Displace a position in the StructureData.""" sites = structure.sites positions = [] for site in sites: positions.append(site.position) new_position = np.asarray(positions[entry - 1]) + displacement new_position = new_position.tolist() positions[entry - 1] = tuple(new_position) structure.reset_sites_positions(positions) def compress_cell(structure, volume_change): """Apply compression or tensile forces to the unit cell.""" cell = structure.cell new_cell = np.array(cell) * volume_change structure.reset_cell(new_cell.tolist()) def aiida_version(): from aiida import __version__ as aiida_version_ return version.parse(aiida_version_) def cmp_version(string): return version.parse(string) def cmp_load_verdi_data(): """Load the verdi data click command group for any version since 0.11.""" verdi_data = None import_errors = [] try: from aiida.cmdline.commands import data_cmd as verdi_data except ImportError as err: import_errors.append(err) if not verdi_data: try: from aiida.cmdline.commands import verdi_data except ImportError as err: import_errors.append(err) if not verdi_data: try: from aiida.cmdline.commands.cmd_data import verdi_data except ImportError as err: import_errors.append(err) if not verdi_data: err_messages = '\n'.join([' * {}'.format(err) for err in import_errors]) raise ImportError('The verdi data base command group could not be found:\n' + err_messages) return verdi_data def create_authinfo(computer, store=False): """Allow the current user to use the given computer.""" from aiida.orm import AuthInfo authinfo = AuthInfo(computer=computer, user=get_current_user()) if store: authinfo.store() return authinfo def cmp_get_authinfo(computer): """Get an existing authinfo or None for the given computer and current user.""" return computer.get_authinfo(get_current_user()) def cmp_get_transport(computer): if hasattr(computer, 'get_transport'): return computer.get_transport() authinfo = cmp_get_authinfo(computer) return authinfo.get_transport()
28.25
99
0.711504
[ "MIT" ]
kavanase/aiida-vasp
aiida_vasp/utils/aiida_utils.py
4,520
Python
# Fully Written by @HeisenbergTheDanger (Keep credits else gay) # Permission Seeked By @StarkXD - Approved import asyncio import datetime from telethon import events from var import Var from uniborg.util import admin_cmd from telethon.tl.types import ( DocumentAttributeFilename, DocumentAttributeSticker, InputMediaUploadedDocument, InputMediaUploadedPhoto, InputPeerNotifySettings, InputStickerSetID, InputStickerSetShortName, MessageMediaPhoto ) from userbot.plugins.sql_helper.broadcast_sql import in_channels, add_channel, rm_channel, get_all_channels logs_id = Var.PRIVATE_GROUP_ID @borg.on(admin_cmd("bforward ?(.*)", allow_sudo=True)) async def forw(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to a message to broadcast.") return channels = get_all_channels() await event.edit("Sending...") error_count = 0 sent_count = 0 if event.reply_to_msg_id: previous_message = await event.get_reply_message() message = previous_message.message raw_text = previous_message.raw_text error_count = 0 for channel in channels: try: await borg.forward_messages(int(channel.chat_id), previous_message) sent_count += 1 await event.edit(f"Sent : {sent_count}\nError : {error_count}\nTotal : {len(channels)}") except Exception as error: try: await borg.send_message(logs_id, f"Error in sending at {channel.chat_id}.") await borg.send_message(logs_id, "Error! " + str(error)) if error == "The message cannot be empty unless a file is provided": event.edit("For sending files, upload in Saved Messages and reply .forward to in.") return except: pass error_count+=1 await event.edit(f"Sent : {sent_count}\nError : {error_count}") await event.edit(f"{sent_count} messages sent with {error_count} errors.") if error_count > 0: try: await borg.send_message(logs_id, f"{error_count} Errors") except: await event.edit("Set up log channel for checking errors.") @borg.on(admin_cmd("broadcast ?(.*)", allow_sudo=True)) async def _(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to a message to broadcast.") return channels = get_all_channels() error_count = 0 sent_count = 0 await event.edit("Sending....") if event.reply_to_msg_id: previous_message = await event.get_reply_message() if previous_message.sticker or previous_message.poll: await event.edit("Reply .forward for stickers and polls.") return if previous_message.gif or previous_message.audio or previous_message.voice or previous_message.video or previous_message.video_note or previous_message.contact or previous_message.game or previous_message.geo or previous_message.invoice: # Written by @HeisenbergTheDanger await event.edit("Not supported. Try .forward") return if not previous_message.web_preview and previous_message.photo: file = await borg.download_file(previous_message.media) uploaded_doc = await borg.upload_file(file, file_name="img.png") raw_text = previous_message.text for channel in channels: try: if previous_message.photo: await borg.send_file( int(channel.chat_id), InputMediaUploadedPhoto( file=uploaded_doc ), force_document=False, caption = raw_text, link_preview = False ) sent_count += 1 await event.edit(f"Sent : {sent_count}\nError : {error_count}\nTotal : {len(channels)}") except Exception as error: try: await borg.send_message(logs_id, f"Error in sending at {chat_id}.") await borg.send_message(logs_id, "Error! " + str(error)) if error == "The message cannot be empty unless a file is provided": event.edit("For sending files, upload in Saved Messages and reply .forward to in.") return except: pass error_count += 1 await event.edit(f"Sent : {sent_count}\nError : {error_count}\nTotal : {len(channels)}") await event.edit(f"{sent_count} messages sent with {error_count} errors.") if error_count > 0: try: await borg.send_message(logs_id, f"{error_count} Errors") except: pass else: raw_text = previous_message.text for channel in channels: try: await borg.send_message(int(channel.chat_id), raw_text, link_preview = False) sent_count += 1 await event.edit(f"Sent : {sent_count}\nError : {error_count}\nTotal : {len(channels)}") except Exception as error: try: await borg.send_message(logs_id, f"Error in sending at {channel.chat_id}.") await borg.send_message(logs_id, "Error! " + str(error)) if error == "The message cannot be empty unless a file is provided": event.edit("For sending files, upload in Saved Messages and reply .forward to in.") return except: pass error_count+=1 await event.edit(f"Sent : {sent_count}\nError : {error_count}\nTotal : {len(channels)}") await event.edit(f"{sent_count} messages sent with {error_count} errors.") if error_count > 0: try: await borg.send_message(logs_id, f"{error_count} Errors") except: await event.edit("Set up log channel for checking errors.") # Written by @HeisenbergTheDanger @borg.on(admin_cmd("badd ?(.*)", allow_sudo=True)) async def add_ch(event): if event.fwd_from: return if event.reply_to_msg_id: await event.edit("Adding...") previous_message = await event.get_reply_message() raw_text = previous_message.text lines = raw_text.split("\n") length = len(lines) for line_number in range(1, length - 2): channel_id = lines[line_number][4:-1] if not in_channels(channel_id): add_channel(channel_id) await event.edit("Channels added!") await asyncio.sleep(3) await event.delete() return chat_id = event.chat_id try: if int(chat_id) == logs_id: return except: pass if not in_channels(chat_id): add_channel(chat_id) await event.edit("`Added Successfuly To List`") await asyncio.sleep(3) await event.delete() elif in_channels(chat_id): await event.edit("`Channel is already is database!`") await asyncio.sleep(3) await event.delete() @borg.on(admin_cmd("brm ?(.*)", allow_sudo=True)) async def remove_ch(event): if event.fwd_from: return chat_id = event.pattern_match.group(1) if chat_id == "all": await event.edit("Removing...") channels = get_all_channels() for channel in channels: rm_channel(channel.chat_id) await event.edit("Database cleared.") return if in_channels(chat_id): rm_channel(chat_id) await event.edit("Removed from database") await asyncio.sleep(3) await event.delete() elif in_channels(event.chat_id): rm_channel(event.chat_id) await event.edit("Removed from database") await asyncio.sleep(3) await event.delete() elif not in_channels(event.chat_id): await event.edit("Channel is already removed from database. ") await asyncio.sleep(3) await event.delete() @borg.on(admin_cmd("listchannels", allow_sudo=True)) async def list(event): if event.fwd_from: return channels = get_all_channels() msg = "Channels in database:\n" for channel in channels: msg += f"=> `{channel.chat_id}`\n" msg += f"\nTotal {len(channels)} channels." if len(msg) > Config.MAX_MESSAGE_SIZE_LIMIT: with io.BytesIO(str.encode(msg)) as out_file: out_file.name = "channels.text" await borg.send_file( event.chat_id, out_file, force_document=True, allow_cache=False, caption="Channels in database", reply_to=event ) await event.delete() else: await event.edit(msg)
36.893617
280
0.624452
[ "MIT" ]
doctorhacker007/FridayUserbot
userbot/plugins/broadcast.py
8,670
Python
import pytest import sinergym.utils.rewards as R @pytest.mark.parametrize( 'power,temperatures,month,day,reward,reward_energy,reward_comfort', [ # Input 1 ( 186.5929171535975, [22.16742570092868], 3, 31, -0.009329645857679876, -0.018659291715359752, -0.0 ), # Input 2 ( 688.0477550424935, [26.7881162590194], 3, 30, -1.6784605172618248, -0.06880477550424935, -3.2881162590194 ), # Input 3 ( 23168.30752221127, [20.37505026953311], 2, 25, -1.1584153761105636, -2.316830752221127, -0.0 ), ] ) def test_calculate( simple_reward, power, temperatures, month, day, reward, reward_energy, reward_comfort): result = simple_reward.calculate(power, temperatures, month, day) assert result[0] == reward assert result[1]['reward_energy'] == reward_energy assert result[1]['reward_comfort'] == reward_comfort
22.5
71
0.498765
[ "MIT" ]
AlejandroCN7/sinergym
tests/test_reward.py
1,215
Python
from main import main from src.common import fill_with class Arguments: pass # latex info result_structure = '|c|c|c|c|c|c|' test_structure = '|c|c|c|c|c|c|c|' table_body = ''' \\begin{center} \t\\begin{tabular}{%s} \t\t\\hline \\rowcolor{brown!50} %s \t\\end{tabular} \\end{center}\n ''' # Util data for build the result tables bots = ['reactive', 'proactive'] test_data_order = ['DIRTY-MEAN', 'CLEAN', 'TLE', 'FIRED'] result_headers = '\t\tTest Id & Tipo de robot & Suciedad media & Ambiente limpio & Tiempo terminado & Despedido \\\\ ' # Util data for build the test table prop = ['rows', 'columns', 'babies', 'time', 'toys', 'dirty'] test_headers = "\t\tTest Id & Filas & Columnas & Beb\\'es & Tiempo(t) & Obstaculos & Suciedad \\\\ \hline" test = [ # n, m, b, t, o, d [10, 10, 6, 2, 20, 40], [10, 10, 6, 3, 20, 40], [10, 10, 6, 4, 20, 40], [10, 10, 6, 5, 20, 40], [10, 10, 6, 10, 20, 40], [10, 10, 6, 15, 20, 40], [ 7, 8, 7, 5, 10, 10], [ 7, 8, 7, 5, 14, 30], [20, 20, 16, 15, 0, 45], [20, 20, 16, 15, 30, 0], [20, 20, 5, 15, 0, 0], [ 7, 8, 6, 2, 20, 40], ] def build_args(): args = Arguments() args.env = 'house' args.level = 'notset' args.log_file = '' args.cicles = 100 args.repetitions = 100 args.verbose = False return args def build_test_row(data, id): test_name = '\\multirow{%d}{*}{t%d}' % (len(data), id) empty = ' ' * len(test_name) row = ' & %s' * 5 callback = lambda: empty test_gen = fill_with([test_name], callback) table = [] for (name, d), head in zip(data, test_gen): current_row = row % (name, *[str(d[key]) for key in test_data_order]) table.append(f'\t\t{head}{current_row} \\\\ ') return '\\cline{2-6}\n'.join(table) + '\\hline ' def build_test_table(data): table = [test_headers] for i, t in enumerate(data): row = ' & '.join([str(v) for v in [f't{i}', *t]]) table.append(f'\t\t{row} \\\\ \\hline') return '\n'.join(table) def build_result_table(data): args = build_args() table = [result_headers] for i, t in enumerate(data): for attr in zip(prop, t): setattr(args, *attr) data = [] for bot in bots: args.robot = bot data.append((bot, main(args))) table.append(build_test_row(data, i)) return '\\hline\n'.join(table) if __name__ == "__main__": print(table_body % (test_structure, build_test_table(test))) print(table_body % (result_structure, build_result_table(test)))
29.766667
120
0.545726
[ "MIT" ]
stdevRulo/babysitter_agent
test.py
2,679
Python
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/models/anchor_heads_rotated/s2anet_head.py import paddle from paddle import ParamAttr import paddle.nn as nn import paddle.nn.functional as F from paddle.nn.initializer import Normal, Constant from ppdet.core.workspace import register from ppdet.modeling import ops from ppdet.modeling import bbox_utils from ppdet.modeling.proposal_generator.target_layer import RBoxAssigner import numpy as np class S2ANetAnchorGenerator(nn.Layer): """ AnchorGenerator by paddle """ def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None): super(S2ANetAnchorGenerator, self).__init__() self.base_size = base_size self.scales = paddle.to_tensor(scales) self.ratios = paddle.to_tensor(ratios) self.scale_major = scale_major self.ctr = ctr self.base_anchors = self.gen_base_anchors() @property def num_base_anchors(self): return self.base_anchors.shape[0] def gen_base_anchors(self): w = self.base_size h = self.base_size if self.ctr is None: x_ctr = 0.5 * (w - 1) y_ctr = 0.5 * (h - 1) else: x_ctr, y_ctr = self.ctr h_ratios = paddle.sqrt(self.ratios) w_ratios = 1 / h_ratios if self.scale_major: ws = (w * w_ratios[:] * self.scales[:]).reshape([-1]) hs = (h * h_ratios[:] * self.scales[:]).reshape([-1]) else: ws = (w * self.scales[:] * w_ratios[:]).reshape([-1]) hs = (h * self.scales[:] * h_ratios[:]).reshape([-1]) base_anchors = paddle.stack( [ x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1) ], axis=-1) base_anchors = paddle.round(base_anchors) return base_anchors def _meshgrid(self, x, y, row_major=True): yy, xx = paddle.meshgrid(y, x) yy = yy.reshape([-1]) xx = xx.reshape([-1]) if row_major: return xx, yy else: return yy, xx def forward(self, featmap_size, stride=16): # featmap_size*stride project it to original area feat_h = featmap_size[0] feat_w = featmap_size[1] shift_x = paddle.arange(0, feat_w, 1, 'int32') * stride shift_y = paddle.arange(0, feat_h, 1, 'int32') * stride shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) shifts = paddle.stack([shift_xx, shift_yy, shift_xx, shift_yy], axis=-1) all_anchors = self.base_anchors[:, :] + shifts[:, :] all_anchors = all_anchors.reshape([feat_h * feat_w, 4]) return all_anchors def valid_flags(self, featmap_size, valid_size): feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = paddle.zeros([feat_w], dtype='int32') valid_y = paddle.zeros([feat_h], dtype='int32') valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy valid = paddle.reshape(valid, [-1, 1]) valid = paddle.expand(valid, [-1, self.num_base_anchors]).reshape([-1]) return valid class AlignConv(nn.Layer): def __init__(self, in_channels, out_channels, kernel_size=3, groups=1): super(AlignConv, self).__init__() self.kernel_size = kernel_size self.align_conv = paddle.vision.ops.DeformConv2D( in_channels, out_channels, kernel_size=self.kernel_size, padding=(self.kernel_size - 1) // 2, groups=groups, weight_attr=ParamAttr(initializer=Normal(0, 0.01)), bias_attr=None) @paddle.no_grad() def get_offset(self, anchors, featmap_size, stride): """ Args: anchors: [M,5] xc,yc,w,h,angle featmap_size: (feat_h, feat_w) stride: 8 Returns: """ anchors = paddle.reshape(anchors, [-1, 5]) # (NA,5) dtype = anchors.dtype feat_h = featmap_size[0] feat_w = featmap_size[1] pad = (self.kernel_size - 1) // 2 idx = paddle.arange(-pad, pad + 1, dtype=dtype) yy, xx = paddle.meshgrid(idx, idx) xx = paddle.reshape(xx, [-1]) yy = paddle.reshape(yy, [-1]) # get sampling locations of default conv xc = paddle.arange(0, feat_w, dtype=dtype) yc = paddle.arange(0, feat_h, dtype=dtype) yc, xc = paddle.meshgrid(yc, xc) xc = paddle.reshape(xc, [-1, 1]) yc = paddle.reshape(yc, [-1, 1]) x_conv = xc + xx y_conv = yc + yy # get sampling locations of anchors # x_ctr, y_ctr, w, h, a = np.unbind(anchors, dim=1) x_ctr = anchors[:, 0] y_ctr = anchors[:, 1] w = anchors[:, 2] h = anchors[:, 3] a = anchors[:, 4] x_ctr = paddle.reshape(x_ctr, [-1, 1]) y_ctr = paddle.reshape(y_ctr, [-1, 1]) w = paddle.reshape(w, [-1, 1]) h = paddle.reshape(h, [-1, 1]) a = paddle.reshape(a, [-1, 1]) x_ctr = x_ctr / stride y_ctr = y_ctr / stride w_s = w / stride h_s = h / stride cos, sin = paddle.cos(a), paddle.sin(a) dw, dh = w_s / self.kernel_size, h_s / self.kernel_size x, y = dw * xx, dh * yy xr = cos * x - sin * y yr = sin * x + cos * y x_anchor, y_anchor = xr + x_ctr, yr + y_ctr # get offset filed offset_x = x_anchor - x_conv offset_y = y_anchor - y_conv offset = paddle.stack([offset_y, offset_x], axis=-1) offset = paddle.reshape( offset, [feat_h * feat_w, self.kernel_size * self.kernel_size * 2]) offset = paddle.transpose(offset, [1, 0]) offset = paddle.reshape( offset, [1, self.kernel_size * self.kernel_size * 2, feat_h, feat_w]) return offset def forward(self, x, refine_anchors, featmap_size, stride): offset = self.get_offset(refine_anchors, featmap_size, stride) x = F.relu(self.align_conv(x, offset)) return x @register class S2ANetHead(nn.Layer): """ S2Anet head Args: stacked_convs (int): number of stacked_convs feat_in (int): input channels of feat feat_out (int): output channels of feat num_classes (int): num_classes anchor_strides (list): stride of anchors anchor_scales (list): scale of anchors anchor_ratios (list): ratios of anchors target_means (list): target_means target_stds (list): target_stds align_conv_type (str): align_conv_type ['Conv', 'AlignConv'] align_conv_size (int): kernel size of align_conv use_sigmoid_cls (bool): use sigmoid_cls or not reg_loss_weight (list): loss weight for regression """ __shared__ = ['num_classes'] __inject__ = ['anchor_assign'] def __init__(self, stacked_convs=2, feat_in=256, feat_out=256, num_classes=15, anchor_strides=[8, 16, 32, 64, 128], anchor_scales=[4], anchor_ratios=[1.0], target_means=0.0, target_stds=1.0, align_conv_type='AlignConv', align_conv_size=3, use_sigmoid_cls=True, anchor_assign=RBoxAssigner().__dict__, reg_loss_weight=[1.0, 1.0, 1.0, 1.0, 1.1], cls_loss_weight=[1.1, 1.05], reg_loss_type='l1'): super(S2ANetHead, self).__init__() self.stacked_convs = stacked_convs self.feat_in = feat_in self.feat_out = feat_out self.anchor_list = None self.anchor_scales = anchor_scales self.anchor_ratios = anchor_ratios self.anchor_strides = anchor_strides self.anchor_strides = paddle.to_tensor(anchor_strides) self.anchor_base_sizes = list(anchor_strides) self.means = paddle.ones(shape=[5]) * target_means self.stds = paddle.ones(shape=[5]) * target_stds assert align_conv_type in ['AlignConv', 'Conv', 'DCN'] self.align_conv_type = align_conv_type self.align_conv_size = align_conv_size self.use_sigmoid_cls = use_sigmoid_cls self.cls_out_channels = num_classes if self.use_sigmoid_cls else 1 self.sampling = False self.anchor_assign = anchor_assign self.reg_loss_weight = reg_loss_weight self.cls_loss_weight = cls_loss_weight self.alpha = 1.0 self.beta = 1.0 self.reg_loss_type = reg_loss_type self.s2anet_head_out = None # anchor self.anchor_generators = [] for anchor_base in self.anchor_base_sizes: self.anchor_generators.append( S2ANetAnchorGenerator(anchor_base, anchor_scales, anchor_ratios)) self.anchor_generators = nn.LayerList(self.anchor_generators) self.fam_cls_convs = nn.Sequential() self.fam_reg_convs = nn.Sequential() for i in range(self.stacked_convs): chan_in = self.feat_in if i == 0 else self.feat_out self.fam_cls_convs.add_sublayer( 'fam_cls_conv_{}'.format(i), nn.Conv2D( in_channels=chan_in, out_channels=self.feat_out, kernel_size=3, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0)))) self.fam_cls_convs.add_sublayer('fam_cls_conv_{}_act'.format(i), nn.ReLU()) self.fam_reg_convs.add_sublayer( 'fam_reg_conv_{}'.format(i), nn.Conv2D( in_channels=chan_in, out_channels=self.feat_out, kernel_size=3, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0)))) self.fam_reg_convs.add_sublayer('fam_reg_conv_{}_act'.format(i), nn.ReLU()) self.fam_reg = nn.Conv2D( self.feat_out, 5, 1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0))) prior_prob = 0.01 bias_init = float(-np.log((1 - prior_prob) / prior_prob)) self.fam_cls = nn.Conv2D( self.feat_out, self.cls_out_channels, 1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(bias_init))) if self.align_conv_type == "AlignConv": self.align_conv = AlignConv(self.feat_out, self.feat_out, self.align_conv_size) elif self.align_conv_type == "Conv": self.align_conv = nn.Conv2D( self.feat_out, self.feat_out, self.align_conv_size, padding=(self.align_conv_size - 1) // 2, bias_attr=ParamAttr(initializer=Constant(0))) elif self.align_conv_type == "DCN": self.align_conv_offset = nn.Conv2D( self.feat_out, 2 * self.align_conv_size**2, 1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0))) self.align_conv = paddle.vision.ops.DeformConv2D( self.feat_out, self.feat_out, self.align_conv_size, padding=(self.align_conv_size - 1) // 2, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=False) self.or_conv = nn.Conv2D( self.feat_out, self.feat_out, kernel_size=3, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0))) # ODM self.odm_cls_convs = nn.Sequential() self.odm_reg_convs = nn.Sequential() for i in range(self.stacked_convs): ch_in = self.feat_out # ch_in = int(self.feat_out / 8) if i == 0 else self.feat_out self.odm_cls_convs.add_sublayer( 'odm_cls_conv_{}'.format(i), nn.Conv2D( in_channels=ch_in, out_channels=self.feat_out, kernel_size=3, stride=1, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0)))) self.odm_cls_convs.add_sublayer('odm_cls_conv_{}_act'.format(i), nn.ReLU()) self.odm_reg_convs.add_sublayer( 'odm_reg_conv_{}'.format(i), nn.Conv2D( in_channels=self.feat_out, out_channels=self.feat_out, kernel_size=3, stride=1, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0)))) self.odm_reg_convs.add_sublayer('odm_reg_conv_{}_act'.format(i), nn.ReLU()) self.odm_cls = nn.Conv2D( self.feat_out, self.cls_out_channels, 3, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(bias_init))) self.odm_reg = nn.Conv2D( self.feat_out, 5, 3, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0))) self.featmap_sizes = [] self.base_anchors_list = [] self.refine_anchor_list = [] def forward(self, feats): fam_reg_branch_list = [] fam_cls_branch_list = [] odm_reg_branch_list = [] odm_cls_branch_list = [] self.featmap_sizes_list = [] self.base_anchors_list = [] self.refine_anchor_list = [] for feat_idx in range(len(feats)): feat = feats[feat_idx] fam_cls_feat = self.fam_cls_convs(feat) fam_cls = self.fam_cls(fam_cls_feat) # [N, CLS, H, W] --> [N, H, W, CLS] fam_cls = fam_cls.transpose([0, 2, 3, 1]) fam_cls_reshape = paddle.reshape( fam_cls, [fam_cls.shape[0], -1, self.cls_out_channels]) fam_cls_branch_list.append(fam_cls_reshape) fam_reg_feat = self.fam_reg_convs(feat) fam_reg = self.fam_reg(fam_reg_feat) # [N, 5, H, W] --> [N, H, W, 5] fam_reg = fam_reg.transpose([0, 2, 3, 1]) fam_reg_reshape = paddle.reshape(fam_reg, [fam_reg.shape[0], -1, 5]) fam_reg_branch_list.append(fam_reg_reshape) # prepare anchor featmap_size = (paddle.shape(feat)[2], paddle.shape(feat)[3]) self.featmap_sizes_list.append(featmap_size) init_anchors = self.anchor_generators[feat_idx]( featmap_size, self.anchor_strides[feat_idx]) init_anchors = paddle.to_tensor(init_anchors, dtype='float32') NA = featmap_size[0] * featmap_size[1] init_anchors = paddle.reshape(init_anchors, [NA, 4]) init_anchors = self.rect2rbox(init_anchors) self.base_anchors_list.append(init_anchors) if self.training: refine_anchor = self.bbox_decode(fam_reg.detach(), init_anchors) else: refine_anchor = self.bbox_decode(fam_reg, init_anchors) self.refine_anchor_list.append(refine_anchor) if self.align_conv_type == 'AlignConv': align_feat = self.align_conv(feat, refine_anchor.clone(), featmap_size, self.anchor_strides[feat_idx]) elif self.align_conv_type == 'DCN': align_offset = self.align_conv_offset(feat) align_feat = self.align_conv(feat, align_offset) elif self.align_conv_type == 'Conv': align_feat = self.align_conv(feat) or_feat = self.or_conv(align_feat) odm_reg_feat = or_feat odm_cls_feat = or_feat odm_reg_feat = self.odm_reg_convs(odm_reg_feat) odm_cls_feat = self.odm_cls_convs(odm_cls_feat) odm_cls_score = self.odm_cls(odm_cls_feat) # [N, CLS, H, W] --> [N, H, W, CLS] odm_cls_score = odm_cls_score.transpose([0, 2, 3, 1]) odm_cls_score_shape = odm_cls_score.shape odm_cls_score_reshape = paddle.reshape(odm_cls_score, [ odm_cls_score_shape[0], odm_cls_score_shape[1] * odm_cls_score_shape[2], self.cls_out_channels ]) odm_cls_branch_list.append(odm_cls_score_reshape) odm_bbox_pred = self.odm_reg(odm_reg_feat) # [N, 5, H, W] --> [N, H, W, 5] odm_bbox_pred = odm_bbox_pred.transpose([0, 2, 3, 1]) odm_bbox_pred_reshape = paddle.reshape(odm_bbox_pred, [-1, 5]) odm_bbox_pred_reshape = paddle.unsqueeze( odm_bbox_pred_reshape, axis=0) odm_reg_branch_list.append(odm_bbox_pred_reshape) self.s2anet_head_out = (fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list) return self.s2anet_head_out def get_prediction(self, nms_pre=2000): refine_anchors = self.refine_anchor_list fam_cls_branch_list = self.s2anet_head_out[0] fam_reg_branch_list = self.s2anet_head_out[1] odm_cls_branch_list = self.s2anet_head_out[2] odm_reg_branch_list = self.s2anet_head_out[3] pred_scores, pred_bboxes = self.get_bboxes( odm_cls_branch_list, odm_reg_branch_list, refine_anchors, nms_pre, self.cls_out_channels, self.use_sigmoid_cls) return pred_scores, pred_bboxes def smooth_l1_loss(self, pred, label, delta=1.0 / 9.0): """ Args: pred: pred score label: label delta: delta Returns: loss """ assert pred.shape == label.shape and label.numel() > 0 assert delta > 0 diff = paddle.abs(pred - label) loss = paddle.where(diff < delta, 0.5 * diff * diff / delta, diff - 0.5 * delta) return loss def get_fam_loss(self, fam_target, s2anet_head_out, reg_loss_type='gwd'): (labels, label_weights, bbox_targets, bbox_weights, bbox_gt_bboxes, pos_inds, neg_inds) = fam_target fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list = s2anet_head_out fam_cls_losses = [] fam_bbox_losses = [] st_idx = 0 num_total_samples = len(pos_inds) + len( neg_inds) if self.sampling else len(pos_inds) num_total_samples = max(1, num_total_samples) for idx, feat_size in enumerate(self.featmap_sizes_list): feat_anchor_num = feat_size[0] * feat_size[1] # step1: get data feat_labels = labels[st_idx:st_idx + feat_anchor_num] feat_label_weights = label_weights[st_idx:st_idx + feat_anchor_num] feat_bbox_targets = bbox_targets[st_idx:st_idx + feat_anchor_num, :] feat_bbox_weights = bbox_weights[st_idx:st_idx + feat_anchor_num, :] # step2: calc cls loss feat_labels = feat_labels.reshape(-1) feat_label_weights = feat_label_weights.reshape(-1) fam_cls_score = fam_cls_branch_list[idx] fam_cls_score = paddle.squeeze(fam_cls_score, axis=0) fam_cls_score1 = fam_cls_score feat_labels = paddle.to_tensor(feat_labels) feat_labels_one_hot = paddle.nn.functional.one_hot( feat_labels, self.cls_out_channels + 1) feat_labels_one_hot = feat_labels_one_hot[:, 1:] feat_labels_one_hot.stop_gradient = True num_total_samples = paddle.to_tensor( num_total_samples, dtype='float32', stop_gradient=True) fam_cls = F.sigmoid_focal_loss( fam_cls_score1, feat_labels_one_hot, normalizer=num_total_samples, reduction='none') feat_label_weights = feat_label_weights.reshape( feat_label_weights.shape[0], 1) feat_label_weights = np.repeat( feat_label_weights, self.cls_out_channels, axis=1) feat_label_weights = paddle.to_tensor( feat_label_weights, stop_gradient=True) fam_cls = fam_cls * feat_label_weights fam_cls_total = paddle.sum(fam_cls) fam_cls_losses.append(fam_cls_total) # step3: regression loss feat_bbox_targets = paddle.to_tensor( feat_bbox_targets, dtype='float32', stop_gradient=True) feat_bbox_targets = paddle.reshape(feat_bbox_targets, [-1, 5]) fam_bbox_pred = fam_reg_branch_list[idx] fam_bbox_pred = paddle.squeeze(fam_bbox_pred, axis=0) fam_bbox_pred = paddle.reshape(fam_bbox_pred, [-1, 5]) fam_bbox = self.smooth_l1_loss(fam_bbox_pred, feat_bbox_targets) loss_weight = paddle.to_tensor( self.reg_loss_weight, dtype='float32', stop_gradient=True) fam_bbox = paddle.multiply(fam_bbox, loss_weight) feat_bbox_weights = paddle.to_tensor( feat_bbox_weights, stop_gradient=True) if reg_loss_type == 'l1': fam_bbox = fam_bbox * feat_bbox_weights fam_bbox_total = paddle.sum(fam_bbox) / num_total_samples elif reg_loss_type == 'iou' or reg_loss_type == 'gwd': fam_bbox = paddle.sum(fam_bbox, axis=-1) feat_bbox_weights = paddle.sum(feat_bbox_weights, axis=-1) try: from rbox_iou_ops import rbox_iou except Exception as e: print("import custom_ops error, try install rbox_iou_ops " \ "following ppdet/ext_op/README.md", e) sys.stdout.flush() sys.exit(-1) # calc iou fam_bbox_decode = self.delta2rbox(self.base_anchors_list[idx], fam_bbox_pred) bbox_gt_bboxes = paddle.to_tensor( bbox_gt_bboxes, dtype=fam_bbox_decode.dtype, place=fam_bbox_decode.place) bbox_gt_bboxes.stop_gradient = True iou = rbox_iou(fam_bbox_decode, bbox_gt_bboxes) iou = paddle.diag(iou) if reg_loss_type == 'gwd': bbox_gt_bboxes_level = bbox_gt_bboxes[st_idx:st_idx + feat_anchor_num, :] fam_bbox_total = self.gwd_loss(fam_bbox_decode, bbox_gt_bboxes_level) fam_bbox_total = fam_bbox_total * feat_bbox_weights fam_bbox_total = paddle.sum( fam_bbox_total) / num_total_samples fam_bbox_losses.append(fam_bbox_total) st_idx += feat_anchor_num fam_cls_loss = paddle.add_n(fam_cls_losses) fam_cls_loss_weight = paddle.to_tensor( self.cls_loss_weight[0], dtype='float32', stop_gradient=True) fam_cls_loss = fam_cls_loss * fam_cls_loss_weight fam_reg_loss = paddle.add_n(fam_bbox_losses) return fam_cls_loss, fam_reg_loss def get_odm_loss(self, odm_target, s2anet_head_out, reg_loss_type='gwd'): (labels, label_weights, bbox_targets, bbox_weights, bbox_gt_bboxes, pos_inds, neg_inds) = odm_target fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list = s2anet_head_out odm_cls_losses = [] odm_bbox_losses = [] st_idx = 0 num_total_samples = len(pos_inds) + len( neg_inds) if self.sampling else len(pos_inds) num_total_samples = max(1, num_total_samples) for idx, feat_size in enumerate(self.featmap_sizes_list): feat_anchor_num = feat_size[0] * feat_size[1] # step1: get data feat_labels = labels[st_idx:st_idx + feat_anchor_num] feat_label_weights = label_weights[st_idx:st_idx + feat_anchor_num] feat_bbox_targets = bbox_targets[st_idx:st_idx + feat_anchor_num, :] feat_bbox_weights = bbox_weights[st_idx:st_idx + feat_anchor_num, :] # step2: calc cls loss feat_labels = feat_labels.reshape(-1) feat_label_weights = feat_label_weights.reshape(-1) odm_cls_score = odm_cls_branch_list[idx] odm_cls_score = paddle.squeeze(odm_cls_score, axis=0) odm_cls_score1 = odm_cls_score feat_labels = paddle.to_tensor(feat_labels) feat_labels_one_hot = paddle.nn.functional.one_hot( feat_labels, self.cls_out_channels + 1) feat_labels_one_hot = feat_labels_one_hot[:, 1:] feat_labels_one_hot.stop_gradient = True num_total_samples = paddle.to_tensor( num_total_samples, dtype='float32', stop_gradient=True) odm_cls = F.sigmoid_focal_loss( odm_cls_score1, feat_labels_one_hot, normalizer=num_total_samples, reduction='none') feat_label_weights = feat_label_weights.reshape( feat_label_weights.shape[0], 1) feat_label_weights = np.repeat( feat_label_weights, self.cls_out_channels, axis=1) feat_label_weights = paddle.to_tensor(feat_label_weights) feat_label_weights.stop_gradient = True odm_cls = odm_cls * feat_label_weights odm_cls_total = paddle.sum(odm_cls) odm_cls_losses.append(odm_cls_total) # # step3: regression loss feat_bbox_targets = paddle.to_tensor( feat_bbox_targets, dtype='float32') feat_bbox_targets = paddle.reshape(feat_bbox_targets, [-1, 5]) feat_bbox_targets.stop_gradient = True odm_bbox_pred = odm_reg_branch_list[idx] odm_bbox_pred = paddle.squeeze(odm_bbox_pred, axis=0) odm_bbox_pred = paddle.reshape(odm_bbox_pred, [-1, 5]) odm_bbox = self.smooth_l1_loss(odm_bbox_pred, feat_bbox_targets) loss_weight = paddle.to_tensor( self.reg_loss_weight, dtype='float32', stop_gradient=True) odm_bbox = paddle.multiply(odm_bbox, loss_weight) feat_bbox_weights = paddle.to_tensor( feat_bbox_weights, stop_gradient=True) if reg_loss_type == 'l1': odm_bbox = odm_bbox * feat_bbox_weights odm_bbox_total = paddle.sum(odm_bbox) / num_total_samples elif reg_loss_type == 'iou' or reg_loss_type == 'gwd': odm_bbox = paddle.sum(odm_bbox, axis=-1) feat_bbox_weights = paddle.sum(feat_bbox_weights, axis=-1) try: from rbox_iou_ops import rbox_iou except Exception as e: print("import custom_ops error, try install rbox_iou_ops " \ "following ppdet/ext_op/README.md", e) sys.stdout.flush() sys.exit(-1) # calc iou odm_bbox_decode = self.delta2rbox(self.refine_anchor_list[idx], odm_bbox_pred) bbox_gt_bboxes = paddle.to_tensor( bbox_gt_bboxes, dtype=odm_bbox_decode.dtype, place=odm_bbox_decode.place) bbox_gt_bboxes.stop_gradient = True iou = rbox_iou(odm_bbox_decode, bbox_gt_bboxes) iou = paddle.diag(iou) if reg_loss_type == 'gwd': bbox_gt_bboxes_level = bbox_gt_bboxes[st_idx:st_idx + feat_anchor_num, :] odm_bbox_total = self.gwd_loss(odm_bbox_decode, bbox_gt_bboxes_level) odm_bbox_total = odm_bbox_total * feat_bbox_weights odm_bbox_total = paddle.sum( odm_bbox_total) / num_total_samples odm_bbox_losses.append(odm_bbox_total) st_idx += feat_anchor_num odm_cls_loss = paddle.add_n(odm_cls_losses) odm_cls_loss_weight = paddle.to_tensor( self.cls_loss_weight[1], dtype='float32', stop_gradient=True) odm_cls_loss = odm_cls_loss * odm_cls_loss_weight odm_reg_loss = paddle.add_n(odm_bbox_losses) return odm_cls_loss, odm_reg_loss def get_loss(self, inputs): # inputs: im_id image im_shape scale_factor gt_bbox gt_class is_crowd # compute loss fam_cls_loss_lst = [] fam_reg_loss_lst = [] odm_cls_loss_lst = [] odm_reg_loss_lst = [] im_shape = inputs['im_shape'] for im_id in range(im_shape.shape[0]): np_im_shape = inputs['im_shape'][im_id].numpy() np_scale_factor = inputs['scale_factor'][im_id].numpy() # data_format: (xc, yc, w, h, theta) gt_bboxes = inputs['gt_rbox'][im_id].numpy() gt_labels = inputs['gt_class'][im_id].numpy() is_crowd = inputs['is_crowd'][im_id].numpy() gt_labels = gt_labels + 1 # featmap_sizes anchors_list_all = np.concatenate(self.base_anchors_list) # get im_feat fam_cls_feats_list = [e[im_id] for e in self.s2anet_head_out[0]] fam_reg_feats_list = [e[im_id] for e in self.s2anet_head_out[1]] odm_cls_feats_list = [e[im_id] for e in self.s2anet_head_out[2]] odm_reg_feats_list = [e[im_id] for e in self.s2anet_head_out[3]] im_s2anet_head_out = (fam_cls_feats_list, fam_reg_feats_list, odm_cls_feats_list, odm_reg_feats_list) # FAM im_fam_target = self.anchor_assign(anchors_list_all, gt_bboxes, gt_labels, is_crowd) if im_fam_target is not None: im_fam_cls_loss, im_fam_reg_loss = self.get_fam_loss( im_fam_target, im_s2anet_head_out, self.reg_loss_type) fam_cls_loss_lst.append(im_fam_cls_loss) fam_reg_loss_lst.append(im_fam_reg_loss) # ODM np_refine_anchors_list = paddle.concat( self.refine_anchor_list).numpy() np_refine_anchors_list = np.concatenate(np_refine_anchors_list) np_refine_anchors_list = np_refine_anchors_list.reshape(-1, 5) im_odm_target = self.anchor_assign(np_refine_anchors_list, gt_bboxes, gt_labels, is_crowd) if im_odm_target is not None: im_odm_cls_loss, im_odm_reg_loss = self.get_odm_loss( im_odm_target, im_s2anet_head_out, self.reg_loss_type) odm_cls_loss_lst.append(im_odm_cls_loss) odm_reg_loss_lst.append(im_odm_reg_loss) fam_cls_loss = paddle.add_n(fam_cls_loss_lst) fam_reg_loss = paddle.add_n(fam_reg_loss_lst) odm_cls_loss = paddle.add_n(odm_cls_loss_lst) odm_reg_loss = paddle.add_n(odm_reg_loss_lst) return { 'fam_cls_loss': fam_cls_loss, 'fam_reg_loss': fam_reg_loss, 'odm_cls_loss': odm_cls_loss, 'odm_reg_loss': odm_reg_loss } def get_bboxes(self, cls_score_list, bbox_pred_list, mlvl_anchors, nms_pre, cls_out_channels, use_sigmoid_cls): assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors) mlvl_bboxes = [] mlvl_scores = [] idx = 0 for cls_score, bbox_pred, anchors in zip(cls_score_list, bbox_pred_list, mlvl_anchors): cls_score = paddle.reshape(cls_score, [-1, cls_out_channels]) if use_sigmoid_cls: scores = F.sigmoid(cls_score) else: scores = F.softmax(cls_score, axis=-1) # bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 5) bbox_pred = paddle.transpose(bbox_pred, [1, 2, 0]) bbox_pred = paddle.reshape(bbox_pred, [-1, 5]) anchors = paddle.reshape(anchors, [-1, 5]) if scores.shape[0] > nms_pre: # Get maximum scores for foreground classes. if use_sigmoid_cls: max_scores = paddle.max(scores, axis=1) else: max_scores = paddle.max(scores[:, 1:], axis=1) topk_val, topk_inds = paddle.topk(max_scores, nms_pre) anchors = paddle.gather(anchors, topk_inds) bbox_pred = paddle.gather(bbox_pred, topk_inds) scores = paddle.gather(scores, topk_inds) bbox_delta = paddle.reshape(bbox_pred, [-1, 5]) bboxes = self.delta2rbox(anchors, bbox_delta) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) idx += 1 mlvl_bboxes = paddle.concat(mlvl_bboxes, axis=0) mlvl_scores = paddle.concat(mlvl_scores) return mlvl_scores, mlvl_bboxes def rect2rbox(self, bboxes): """ :param bboxes: shape (n, 4) (xmin, ymin, xmax, ymax) :return: dbboxes: shape (n, 5) (x_ctr, y_ctr, w, h, angle) """ bboxes = paddle.reshape(bboxes, [-1, 4]) num_boxes = paddle.shape(bboxes)[0] x_ctr = (bboxes[:, 2] + bboxes[:, 0]) / 2.0 y_ctr = (bboxes[:, 3] + bboxes[:, 1]) / 2.0 edges1 = paddle.abs(bboxes[:, 2] - bboxes[:, 0]) edges2 = paddle.abs(bboxes[:, 3] - bboxes[:, 1]) rbox_w = paddle.maximum(edges1, edges2) rbox_h = paddle.minimum(edges1, edges2) # set angle inds = edges1 < edges2 inds = paddle.cast(inds, 'int32') rboxes_angle = inds * np.pi / 2.0 rboxes = paddle.stack( (x_ctr, y_ctr, rbox_w, rbox_h, rboxes_angle), axis=1) return rboxes # deltas to rbox def delta2rbox(self, rrois, deltas, wh_ratio_clip=1e-6): """ :param rrois: (cx, cy, w, h, theta) :param deltas: (dx, dy, dw, dh, dtheta) :param means: means of anchor :param stds: stds of anchor :param wh_ratio_clip: clip threshold of wh_ratio :return: """ deltas = paddle.reshape(deltas, [-1, 5]) rrois = paddle.reshape(rrois, [-1, 5]) # fix dy2st bug denorm_deltas = deltas * self.stds + self.means denorm_deltas = paddle.add( paddle.multiply(deltas, self.stds), self.means) dx = denorm_deltas[:, 0] dy = denorm_deltas[:, 1] dw = denorm_deltas[:, 2] dh = denorm_deltas[:, 3] dangle = denorm_deltas[:, 4] max_ratio = np.abs(np.log(wh_ratio_clip)) dw = paddle.clip(dw, min=-max_ratio, max=max_ratio) dh = paddle.clip(dh, min=-max_ratio, max=max_ratio) rroi_x = rrois[:, 0] rroi_y = rrois[:, 1] rroi_w = rrois[:, 2] rroi_h = rrois[:, 3] rroi_angle = rrois[:, 4] gx = dx * rroi_w * paddle.cos(rroi_angle) - dy * rroi_h * paddle.sin( rroi_angle) + rroi_x gy = dx * rroi_w * paddle.sin(rroi_angle) + dy * rroi_h * paddle.cos( rroi_angle) + rroi_y gw = rroi_w * dw.exp() gh = rroi_h * dh.exp() ga = np.pi * dangle + rroi_angle ga = (ga + np.pi / 4) % np.pi - np.pi / 4 ga = paddle.to_tensor(ga) gw = paddle.to_tensor(gw, dtype='float32') gh = paddle.to_tensor(gh, dtype='float32') bboxes = paddle.stack([gx, gy, gw, gh, ga], axis=-1) return bboxes def bbox_decode(self, bbox_preds, anchors): """decode bbox from deltas Args: bbox_preds: [N,H,W,5] anchors: [H*W,5] return: bboxes: [N,H,W,5] """ num_imgs, H, W, _ = bbox_preds.shape bbox_delta = paddle.reshape(bbox_preds, [-1, 5]) bboxes = self.delta2rbox(anchors, bbox_delta) return bboxes def trace(self, A): tr = paddle.diagonal(A, axis1=-2, axis2=-1) tr = paddle.sum(tr, axis=-1) return tr def sqrt_newton_schulz_autograd(self, A, numIters): A_shape = A.shape batchSize = A_shape[0] dim = A_shape[1] normA = A * A normA = paddle.sum(normA, axis=1) normA = paddle.sum(normA, axis=1) normA = paddle.sqrt(normA) normA1 = normA.reshape([batchSize, 1, 1]) Y = paddle.divide(A, paddle.expand_as(normA1, A)) I = paddle.eye(dim, dim).reshape([1, dim, dim]) l0 = [] for i in range(batchSize): l0.append(I) I = paddle.concat(l0, axis=0) I.stop_gradient = False Z = paddle.eye(dim, dim).reshape([1, dim, dim]) l1 = [] for i in range(batchSize): l1.append(Z) Z = paddle.concat(l1, axis=0) Z.stop_gradient = False for i in range(numIters): T = 0.5 * (3.0 * I - Z.bmm(Y)) Y = Y.bmm(T) Z = T.bmm(Z) sA = Y * paddle.sqrt(normA1).reshape([batchSize, 1, 1]) sA = paddle.expand_as(sA, A) return sA def wasserstein_distance_sigma(sigma1, sigma2): wasserstein_distance_item2 = paddle.matmul( sigma1, sigma1) + paddle.matmul( sigma2, sigma2) - 2 * self.sqrt_newton_schulz_autograd( paddle.matmul( paddle.matmul(sigma1, paddle.matmul(sigma2, sigma2)), sigma1), 10) wasserstein_distance_item2 = self.trace(wasserstein_distance_item2) return wasserstein_distance_item2 def xywhr2xyrs(self, xywhr): xywhr = paddle.reshape(xywhr, [-1, 5]) xy = xywhr[:, :2] wh = paddle.clip(xywhr[:, 2:4], min=1e-7, max=1e7) r = xywhr[:, 4] cos_r = paddle.cos(r) sin_r = paddle.sin(r) R = paddle.stack( (cos_r, -sin_r, sin_r, cos_r), axis=-1).reshape([-1, 2, 2]) S = 0.5 * paddle.nn.functional.diag_embed(wh) return xy, R, S def gwd_loss(self, pred, target, fun='log', tau=1.0, alpha=1.0, normalize=False): xy_p, R_p, S_p = self.xywhr2xyrs(pred) xy_t, R_t, S_t = self.xywhr2xyrs(target) xy_distance = (xy_p - xy_t).square().sum(axis=-1) Sigma_p = R_p.matmul(S_p.square()).matmul(R_p.transpose([0, 2, 1])) Sigma_t = R_t.matmul(S_t.square()).matmul(R_t.transpose([0, 2, 1])) whr_distance = paddle.diagonal( S_p, axis1=-2, axis2=-1).square().sum(axis=-1) whr_distance = whr_distance + paddle.diagonal( S_t, axis1=-2, axis2=-1).square().sum(axis=-1) _t = Sigma_p.matmul(Sigma_t) _t_tr = paddle.diagonal(_t, axis1=-2, axis2=-1).sum(axis=-1) _t_det_sqrt = paddle.diagonal(S_p, axis1=-2, axis2=-1).prod(axis=-1) _t_det_sqrt = _t_det_sqrt * paddle.diagonal( S_t, axis1=-2, axis2=-1).prod(axis=-1) whr_distance = whr_distance + (-2) * ( (_t_tr + 2 * _t_det_sqrt).clip(0).sqrt()) distance = (xy_distance + alpha * alpha * whr_distance).clip(0) if normalize: wh_p = pred[..., 2:4].clip(min=1e-7, max=1e7) wh_t = target[..., 2:4].clip(min=1e-7, max=1e7) scale = ((wh_p.log() + wh_t.log()).sum(dim=-1) / 4).exp() distance = distance / scale if fun == 'log': distance = paddle.log1p(distance) if tau >= 1.0: return 1 - 1 / (tau + distance) return distance
40.058151
116
0.572975
[ "Apache-2.0" ]
1190202328/PaddleDetection
ppdet/modeling/heads/s2anet_head.py
42,021
Python
from http.server import HTTPServer, BaseHTTPRequestHandler from socketserver import ThreadingMixIn from .redirect import RedirectHandler import threading import ssl __all__ = ['ThreadedServer', 'SecureServer'] class ThreadedServer(ThreadingMixIn, HTTPServer): protocol_version = 'HTTP/1.1' def __init__(self, host: str, port: int, RequestHandlerClass: BaseHTTPRequestHandler, bind_and_activate: bool=True): self._serve_forever_thread = None # type: threading.Thread super().__init__((host, port), RequestHandlerClass, bind_and_activate) def serve_forever(self, poll_interval=0.5): self._serve_forever_thread = threading.Thread( target=super().serve_forever, args=(poll_interval,) ) self._serve_forever_thread.start() class SecureServer(ThreadedServer): def __init__(self, certfile: str, keyfile: str, host: str, port: int, RequestHandlerClass: BaseHTTPRequestHandler, bind_and_activate: bool = True): self._certfile = certfile self._keyfile = keyfile self._redirect = ThreadedServer(host, 80, RedirectHandler, bind_and_activate) super().__init__(host, port, RequestHandlerClass, bind_and_activate) def server_bind(self): super().server_bind() self._redirect.server_bind() self.socket = ssl.wrap_socket(self.socket, server_side=True, certfile=self._certfile, keyfile=self._keyfile, do_handshake_on_connect=False) def get_request(self): sock, addr = super().get_request() sock.do_handshake() return sock, addr def serve_forever(self, poll_interval=0.5): super().serve_forever(poll_interval) self._redirect.serve_forever(poll_interval) def shutdown(self): super().shutdown() self._redirect.shutdown()
34.242424
78
0.574779
[ "MIT" ]
ChrisCalderon/SerpentServer
serpent_server/server.py
2,260
Python
# Generated by Django 2.1.3 on 2019-01-07 17:48 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blog', '0014_auto_20190107_2251'), ] operations = [ migrations.DeleteModel( name='PostPicks', ), migrations.AddField( model_name='post', name='image', field=models.ImageField(default='default.jpg', upload_to='post_pics'), ), ]
21.954545
82
0.581781
[ "MIT" ]
dkowsikpai/librolet
blog/migrations/0015_auto_20190107_2318.py
483
Python
# Copyright (C) 2008-today The SG++ project # This file is part of the SG++ project. For conditions of distribution and # use, please see the copyright notice provided with SG++ or at # sgpp.sparsegrids.org # This file is part of SGClass, a program package making use of spatially adaptive sparse grids to solve numerical problems # # Copyright (C) 2007 Dirk Pflueger ([email protected]) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with pyclass. If not, see <http://www.gnu.org/licenses/>. ## @package classifier # @ingroup bin # @brief Show some statistics for data files #Handles either ARFF-files or plain data files. If the ARFF file #contains a class attribute, it also shows statistics for the class #distribution. # # Help with <tt>--help</tt>. # @version $CURR$ from optparse import OptionParser import sys, os, math from tools import * parser = OptionParser() parser.set_usage('%prog [options]\n\t Gives some statistics for datasets, given either in arff or simple file format') parser.add_option("-i", "--infile", action="append", type="string", dest="infiles", help="Specifies the inputfiles to analyse.") (options,args)=parser.parse_args() if options.infiles == None: parser.parse_args(['-h']) # loop over infiles for filename in options.infiles: try: # read in files print "================= %20s =================" %(filename) ftype = isARFFFile(filename) if ftype == ARFF: dataset = readDataARFF(filename) elif ftype == SIMPLE: dataset = readDataTrivial(filename) else: sys.stderr.write("Skipping "+filename+os.linesep) continue # analyse data # header dim = len(dataset["data"]) numpoints = len(dataset["data"][0]) print "Dim (#attributes): %d"%(dim) print " %-4s %-12s %-12s %-12s %-12s %-12s" % ("Dim", "Min", "Max", "mean", "unbiased V", "samplestddev") # traverse all attributes for i in range(dim): total_sum = sum(dataset["data"][i]) mean = total_sum/float(numpoints) unbiased_variance = sum(map(lambda x: (x-mean)**2, dataset["data"][i]))/float(numpoints-1) sample_stddev = math.sqrt(unbiased_variance) print " %02d %12f %12f %12f %12f %12f" %(i+1, min(dataset["data"][i]), max(dataset["data"][i]), mean, unbiased_variance, sample_stddev) print "#data points: %d"%(numpoints) # statistics for class distribution if dataset.has_key("classes"): print "Class distribution:" class_count = {} for c in dataset["classes"]: if class_count.has_key(c): class_count[c] += 1 else: class_count[c] = 1 class_values = class_count.keys() class_values.sort() for c in class_values: print " %12f %d" % (c, class_count[c]) except Exception, e: sys.stderr.write("ERROR: Skipping "+filename+os.linesep) print " ",e
41.37
128
0.556442
[ "CC0-1.0" ]
valentjn/thesis
lib/pysgpp/extensions/misc/datasetAnalysis.py
4,137
Python
"""Entrypoint for the WSGI app (web API) """ from . import api application = api.create_app()
15.833333
40
0.694737
[ "MIT" ]
lionel-panhaleux/krcg-api
krcg_api/wsgi.py
95
Python
"""Utilties for distributed processing""" import horovod.tensorflow.keras as hvd def rank(): try: return hvd.rank() except ValueError: return 0 def barrier(): try: hvd.allreduce([], name='Barrier') except ValueError: pass
17.0625
41
0.611722
[ "Apache-2.0" ]
bgerofi/hpc_results_v0.7
CSCS/benchmarks/cosmoflow/implementations/cosmoflow-benchmark/utils/distributed.py
273
Python
#!/usr/bin/env python3 import importlib import logging import os import traceback import pybullet_data import rclpy from rclpy import executors from rclpy.executors import MultiThreadedExecutor from rclpy.node import Node from std_srvs.srv import Empty from pybullet_ros.function_exec_manager import FuncExecManager class pyBulletRosWrapper(Node): """ROS wrapper class for pybullet simulator""" def __init__(self): super().__init__('pybullet_ros', automatically_declare_parameters_from_overrides=True) ex = MultiThreadedExecutor() self.executor = ex # import pybullet self.pb = importlib.import_module('pybullet') # get from param server the frequency at which to run the simulation self.loop_rate = self.get_parameter('loop_rate').value self.get_logger().info('Loop rate: {}'.format(self.loop_rate)) # query from param server if gui is needed is_gui_needed = self.get_parameter('pybullet_gui').value # get from param server if user wants to pause simulation at startup self.pause_simulation = self.get_parameter('pause_simulation').value print('\033[34m') # print pybullet stuff in blue physicsClient = self.start_gui(gui=is_gui_needed) # we dont need to store the physics client for now... # setup service to restart simulation self.create_service(Empty, 'reset_simulation', self.handle_reset_simulation) # setup services for pausing/unpausing simulation self.create_service(Empty, 'pause_physics', self.handle_pause_physics) self.create_service(Empty, 'unpause_physics', self.handle_unpause_physics) # get pybullet path in your system and store it internally for future use, e.g. to set floor self.pb.setAdditionalSearchPath(pybullet_data.getDataPath()) # create object of environment class for later use env_plugin = self.get_parameter('environment').value # default : plugins/environment.py plugin_import_prefix = self.get_parameter('plugin_import_prefix').value self.environment = getattr(importlib.import_module(f'{plugin_import_prefix}.{env_plugin}'), 'Environment')(self) # load robot URDF model, set gravity, and ground plane self.robot = self.init_pybullet_robot() self.connected_to_physics_server = None if not self.robot: self.connected_to_physics_server = False return # Error while loading urdf file else: self.connected_to_physics_server = True # get all revolute joint names and pybullet index rev_joint_index_name_dic, prismatic_joint_index_name_dic, fixed_joint_index_name_dic, link_names_to_ids_dic = self.get_properties() # import plugins dynamically self.plugins = [] plugins = self.get_parameter('plugins').value if not plugins: self.get_logger().warn('No plugins found, forgot to set param plugins?') # return to normal shell color print('\033[0m') # load plugins for plugin in plugins: module_, class_ = plugin.split(':') params_ = {'module': module_, 'class': class_} self.get_logger().info('loading plugin: {} class from {}'.format(class_, module_)) # create object of the imported file class obj = getattr(importlib.import_module(module_), class_)(self.pb, self.robot, rev_joints=rev_joint_index_name_dic, prism_joints=prismatic_joint_index_name_dic, fixed_joints=fixed_joint_index_name_dic, link_ids=link_names_to_ids_dic, **params_) # store objects in member variable for future use self.plugins.append(obj) self.executor.add_node(obj) self.get_logger().info('pybullet ROS wrapper initialized') self.timer = self.create_timer(1.0 / self.loop_rate, self.wrapper_callback) self.executor.add_node(self) try: self.executor.spin() #except Exception as e: # self.get_logger().error(traceback.format_exc()) finally: self.executor.shutdown() self.destroy_node() for node in self.plugins: node.destroy_node() def wrapper_callback(self): self.pb.stepSimulation() if not self.connected_to_physics_server: self.pb.disconnect() def get_properties(self): """ construct 3 dictionaries: - joint index to joint name x2 (1 for revolute, 1 for fixed joints) - link name to link index dictionary """ rev_joint_index_name_dic = {} fixed_joint_index_name_dic = {} prismatic_joint_index_name_dic = {} link_names_to_ids_dic = {} for joint_index in range(0, self.pb.getNumJoints(self.robot)): info = self.pb.getJointInfo(self.robot, joint_index) # build a dictionary of link names to ids link_names_to_ids_dic[info[12].decode('utf-8')] = joint_index # ensure we are dealing with a revolute joint if info[2] == self.pb.JOINT_REVOLUTE: # insert key, value in dictionary (joint index, joint name) rev_joint_index_name_dic[joint_index] = info[1].decode('utf-8') # info[1] refers to joint name elif info[2] == self.pb.JOINT_FIXED: # insert key, value in dictionary (joint index, joint name) fixed_joint_index_name_dic[joint_index] = info[1].decode('utf-8') # info[1] refers to joint name elif info[2] == self.pb.JOINT_PRISMATIC: prismatic_joint_index_name_dic[joint_index] = info[1].decode('utf-8') # info[1] refers to joint name return rev_joint_index_name_dic, prismatic_joint_index_name_dic, fixed_joint_index_name_dic, link_names_to_ids_dic def handle_reset_simulation(self, req): """Callback to handle the service offered by this node to reset the simulation""" self.get_logger().info('reseting simulation now') self.pb.resetSimulation() return Empty() def start_gui(self, gui=True): """start physics engine (client) with or without gui""" if(gui): # start simulation with gui self.get_logger().info('Running pybullet with gui') self.get_logger().info('-------------------------') gui_options = self.get_parameter('gui_options').value # e.g. to maximize screen: options="--width=2560 --height=1440" return self.pb.connect(self.pb.GUI, options=gui_options) else: # start simulation without gui (non-graphical version) self.get_logger().info('Running pybullet without gui') # hide console output from pybullet self.get_logger().info('-------------------------') return self.pb.connect(self.pb.DIRECT) def init_pybullet_robot(self): """load robot URDF model, set gravity, ground plane and environment""" # get from param server the path to the URDF robot model to load at startup urdf_path = self.get_parameter('robot_urdf_path').value if urdf_path == None: self.get_logger().warn('mandatory param robot_urdf_path not set, will exit now') rclpy.shutdown() # test urdf file existance if not os.path.isfile(urdf_path): self.get_logger().error('param robot_urdf_path is set, but file does not exist : ' + urdf_path) rclpy.shutdown() return None # ensure urdf is not xacro, but if it is then make urdf file version out of it if 'xacro' in urdf_path: # remove xacro from name urdf_path_without_xacro = urdf_path[0:urdf_path.find('.xacro')]+urdf_path[urdf_path.find('.xacro')+len('.xacro'):] os.system(f'xacro {urdf_path} -o {urdf_path_without_xacro}') urdf_path = urdf_path_without_xacro # get robot spawn pose from parameter server robot_pose_x = self.get_parameter('robot_pose_x').value robot_pose_y = self.get_parameter('robot_pose_y').value robot_pose_z = self.get_parameter('robot_pose_z').value robot_pose_yaw = self.get_parameter('robot_pose_yaw').value robot_spawn_orientation = self.pb.getQuaternionFromEuler([0.0, 0.0, robot_pose_yaw]) fixed_base = self.get_parameter('fixed_base').value # load robot from URDF model # user decides if inertia is computed automatically by pybullet or custom if self.get_parameter('use_inertia_from_file').value: # combining several boolean flags using "or" according to pybullet documentation urdf_flags = self.pb.URDF_USE_INERTIA_FROM_FILE | self.pb.URDF_USE_SELF_COLLISION else: urdf_flags = self.pb.URDF_USE_SELF_COLLISION # load environment self.get_logger().info('loading environment') self.environment.load_environment() # set no realtime simulation, NOTE: no need to stepSimulation if setRealTimeSimulation is set to 1 self.pb.setRealTimeSimulation(0) # NOTE: does not currently work with effort controller, thats why is left as 0 self.get_logger().info('loading urdf model: ' + urdf_path) # NOTE: self collision enabled by default return self.pb.loadURDF(urdf_path, basePosition=[robot_pose_x, robot_pose_y, robot_pose_z], baseOrientation=robot_spawn_orientation, useFixedBase=fixed_base, flags=urdf_flags) def handle_reset_simulation(self, req): """Callback to handle the service offered by this node to reset the simulation""" self.get_logger().info('reseting simulation now') # pause simulation to prevent reading joint values with an empty world self.pause_simulation = True # remove all objects from the world and reset the world to initial conditions self.pb.resetSimulation() # load URDF model again, set gravity and floor self.init_pybullet_robot() # resume simulation control cycle now that a new robot is in place self.pause_simulation = False return [] def handle_pause_physics(self, req): """pause simulation, raise flag to prevent pybullet to execute self.pb.stepSimulation()""" self.get_logger().info('pausing simulation') self.pause_simulation = False return [] def handle_unpause_physics(self, req): """unpause simulation, lower flag to allow pybullet to execute self.pb.stepSimulation()""" self.get_logger().info('unpausing simulation') self.pause_simulation = True return [] def pause_simulation_function(self): return self.pause_simulation def main(): try: rclpy.init() pyBulletRosWrapper() finally: rclpy.shutdown() if __name__ == '__main__': main()
48.160173
139
0.65582
[ "MIT" ]
packbionics/pybullet_ros
pybullet_ros/pybullet_ros_wrapper.py
11,125
Python
# Given a binary search tree (BST), find the lowest common ancestor (LCA) of two given nodes in the BST. # # According to the definition of LCA on Wikipedia: “The lowest common ancestor is defined between two nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node to be a descendant of itself).” # #   # Example 1: # # # Input: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 8 # Output: 6 # Explanation: The LCA of nodes 2 and 8 is 6. # # # Example 2: # # # Input: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 4 # Output: 2 # Explanation: The LCA of nodes 2 and 4 is 2, since a node can be a descendant of itself according to the LCA definition. # # # Example 3: # # # Input: root = [2,1], p = 2, q = 1 # Output: 2 # # #   # Constraints: # # # The number of nodes in the tree is in the range [2, 105]. # -109 <= Node.val <= 109 # All Node.val are unique. # p != q # p and q will exist in the BST. # # # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode': # 二叉搜索树 if p.val < root.val and q.val < root.val: return self.lowestCommonAncestor(root.left, p, q) if p.val > root.val and q.val > root.val: return self.lowestCommonAncestor(root.right, p, q) return root
26.245614
231
0.620321
[ "MIT" ]
chyidl/leetcode
0235-lowest-common-ancestor-of-a-binary-search-tree/lowest-common-ancestor-of-a-binary-search-tree.py
1,512
Python
import numpy as np class Dense(): def __init__(self, units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', input_shape=None): self._units = units self._activation = activation self._use_bias = use_bias self._kernal_initializer = kernel_initializer self._bias_initializer = bias_initializer self._bias = np.zeros((units, 1)) def setPrevUnits(self, units): self._prevUnits = units self._weights = np.zeros((self._units, units)) self._weights = np.random.standard_normal( size=self._weights.shape) * 0.01 def forward(self, arr): out = self._weights.dot(arr) + self._bias if self._activation == "relu": out[out <= 0] = 0 if self._activation == "softmax": out = self.softmax(out) return out def backwardFirst(self, dout, z): dw = dout.dot(z.T) db = np.sum(dout, axis=1) db = np.reshape(db, (db.shape[0], 1)) return dw, db def backward(self, dout, next_weights, flat, z): dz = next_weights.T.dot(dout) if (self._activation == "relu"): dz[z <= 0] = 0 dw = dz.dot(flat.T) db = np.sum(dz, axis=1).reshape(self._bias.shape) return dw, db, dz def softmax(self, X): out = np.exp(X) return out/np.sum(out)
31.4
143
0.587403
[ "MIT" ]
GreatGameDota/CNN-Numpy-1D-Images
CNN/Dense.py
1,413
Python
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Union from .. import utilities, tables class AnalyticsConfiguration(pulumi.CustomResource): bucket: pulumi.Output[str] """ The name of the bucket this analytics configuration is associated with. """ filter: pulumi.Output[dict] """ Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). * `prefix` (`str`) - Object prefix for filtering. * `tags` (`dict`) - Set of object tags for filtering. """ name: pulumi.Output[str] """ Unique identifier of the analytics configuration for the bucket. """ storage_class_analysis: pulumi.Output[dict] """ Configuration for the analytics data export (documented below). * `dataExport` (`dict`) - Data export configuration (documented below). * `destination` (`dict`) - Specifies the destination for the exported analytics data (documented below). * `s3BucketDestination` (`dict`) - Analytics data export currently only supports an S3 bucket destination (documented below). * `bucketAccountId` (`str`) - The account ID that owns the destination bucket. * `bucketArn` (`str`) - The ARN of the destination bucket. * `format` (`str`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`. * `prefix` (`str`) - Object prefix for filtering. * `outputSchemaVersion` (`str`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`. """ def __init__(__self__, resource_name, opts=None, bucket=None, filter=None, name=None, storage_class_analysis=None, __props__=None, __name__=None, __opts__=None): """ Provides a S3 bucket [analytics configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) resource. ## Example Usage ### Add analytics configuration for entire S3 bucket and export results to a second S3 bucket ```python import pulumi import pulumi_aws as aws example = aws.s3.Bucket("example") analytics = aws.s3.Bucket("analytics") example_entire_bucket = aws.s3.AnalyticsConfiguration("example-entire-bucket", bucket=example.bucket, storage_class_analysis={ "dataExport": { "destination": { "s3BucketDestination": { "bucketArn": analytics.arn, }, }, }, }) ``` ### Add analytics configuration with S3 bucket object filter ```python import pulumi import pulumi_aws as aws example = aws.s3.Bucket("example") example_filtered = aws.s3.AnalyticsConfiguration("example-filtered", bucket=example.bucket, filter={ "prefix": "documents/", "tags": { "priority": "high", "class": "blue", }, }) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with. :param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). :param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket. :param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below). The **filter** object supports the following: * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering. * `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering. The **storage_class_analysis** object supports the following: * `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below). * `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below). * `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below). * `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket. * `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket. * `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`. * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering. * `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if bucket is None: raise TypeError("Missing required property 'bucket'") __props__['bucket'] = bucket __props__['filter'] = filter __props__['name'] = name __props__['storage_class_analysis'] = storage_class_analysis super(AnalyticsConfiguration, __self__).__init__( 'aws:s3/analyticsConfiguration:AnalyticsConfiguration', resource_name, __props__, opts) @staticmethod def get(resource_name, id, opts=None, bucket=None, filter=None, name=None, storage_class_analysis=None): """ Get an existing AnalyticsConfiguration resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param str id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with. :param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). :param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket. :param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below). The **filter** object supports the following: * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering. * `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering. The **storage_class_analysis** object supports the following: * `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below). * `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below). * `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below). * `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket. * `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket. * `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`. * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering. * `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["bucket"] = bucket __props__["filter"] = filter __props__["name"] = name __props__["storage_class_analysis"] = storage_class_analysis return AnalyticsConfiguration(resource_name, opts=opts, __props__=__props__) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
51.11413
165
0.645933
[ "ECL-2.0", "Apache-2.0" ]
michael-golden/pulumi-aws
sdk/python/pulumi_aws/s3/analytics_configuration.py
9,405
Python
import asyncio import google.protobuf.any_pb2 import irsdk from asyncio import Queue from google.protobuf.any_pb2 import Any from TelemetryDataUtils import getInfo, getGeneral from models import typed_message_pb2 from models.State import State class TelemetryLogger: def __init__(self, receiver_queue: Queue, pushable_queue: Queue, streaming_queue: Queue): self.state = State() self.ir = irsdk.IRSDK() self.should_run = False self.receiver_queue = receiver_queue self.pushable_queue = pushable_queue self.streaming_queue = streaming_queue self.sentGeneralData = False async def run(self): # TODO: Fix this while True: self.should_run = await self.receiver_queue.get() if self.should_run: break while self.should_run: self.is_sim_running() if self.state.ir_connected: await self.get_iracing_data() await asyncio.sleep(5) # await asyncio.sleep(0.1) # TODO: Figure out how to shut off if told too def is_sim_running(self): if self.state.ir_connected and not (self.ir.is_initialized and self.ir.is_connected): self.state.ir_connected = False # don"t forget to reset your State variables self.state.last_car_setup_tick = -1 # we are shutting down ir library (clearing all internal variables) self.ir.shutdown() print("irsdk disconnected") elif not self.state.ir_connected and self.ir.startup( test_file="./data/data.bin") and self.ir.is_initialized and self.ir.is_connected: self.state.ir_connected = True print("irsdk connected") async def get_iracing_data(self): # data per tick since data can change midway self.ir.freeze_var_buffer_latest() streamable: typed_message_pb2.TypedMessage = typed_message_pb2.TypedMessage() streamable.type = typed_message_pb2.TypedMessage.Type.LOGGER_STREAM streamable.message.Pack(getInfo(ir=self.ir)) pushable: typed_message_pb2.TypedMessage = typed_message_pb2.TypedMessage() pushable.type = typed_message_pb2.TypedMessage.Type.LOGGER_UPDATE pushable.message.Pack(getGeneral(self.ir)) if not self.sentGeneralData: await self.streaming_queue.put(pushable.SerializeToString()) self.sentGeneralData = True await self.streaming_queue.put(streamable.SerializeToString())
37.188406
97
0.669914
[ "MIT" ]
LandonPatmore/iracing-live-telemetry
logger/TelemetryLogger.py
2,566
Python
from nbconvert.writers.base import WriterBase class HelloWriter(WriterBase): def write(self, output, resources, notebook_name=None, **kw): with open("hello.txt", "w") as outfile: outfile.write("hello world")
29.25
65
0.683761
[ "BSD-3-Clause" ]
CMU-IDS-2022/final-project-the-evaluators
venv/lib/python3.9/site-packages/nbconvert/tests/files/hello.py
234
Python
import os import sys try: from setuptools import setup except ImportError: from distutils.core import setup with open("README.md") as f: readme = f.read() classifiers = [ 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', ] setup( name = "tinypandas", version = "0.0.1", description = "A small pure python library with Pandas like API", long_description = readme, packages = ['tinypandas', 'tinypandas.tests'], package_dir = { 'tinypandas' : 'src', 'tinypandas.tests' : 'tests' }, install_requires = [ ], author = "@lexual, Dilawar Singh <[email protected]>", maintainer = "Dilawar Singh", maintainer_email = "[email protected]", url = "http://github.com/dilawar/", license='GPL?', classifiers=classifiers, )
26.6875
73
0.651054
[ "BSD-3-Clause" ]
dilawar/tinypandas
setup.py
854
Python
from importlib.resources import path import sys import os import shutil from git import Repo from subprocess import call from git import RemoteProgress import git from tqdm import tqdm from pathlib import Path dir_path = (os.path.expanduser('~/Documents') + "\server") os.chdir(dir_path) gitaddress = str("https://github.com/0xol/server") print("what server version would you like to install") print("format is 'client-version'") print("example 'forge-1.16.5' or 'vanilla-1.7.10'") print("for lists of supported server version check https://github.com/0xol/server and check under branches") branch = input() os.system("del /F /S /Q /A .git") os.system("del /F /S /Q /A .git") #just in case the program didnt kill it the first time folder = dir_path for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) class CloneProgress(RemoteProgress): def __init__(self): super().__init__() self.pbar = tqdm() def update(self, op_code, cur_count, max_count=None, message=''): self.pbar.total = max_count self.pbar.n = cur_count self.pbar.refresh() print(dir_path) Repo.clone_from(gitaddress, dir_path , branch=branch, progress=CloneProgress())
28.283019
108
0.695797
[ "MIT" ]
0xol/server-installer
main.py
1,499
Python
############################################################################### # # Tests for XlsxWriter. # # SPDX-License-Identifier: BSD-2-Clause # Copyright (c), 2013-2021, John McNamara, [email protected] # from ..excel_comparison_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename('chart_scatter03.xlsx') def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({'type': 'scatter', 'subtype': 'straight'}) chart.axis_ids = [54010624, 45705856] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column('A1', data[0]) worksheet.write_column('B1', data[1]) worksheet.write_column('C1', data[2]) chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$B$1:$B$5'}) chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$C$1:$C$5', }) worksheet.insert_chart('E9', chart) workbook.close() self.assertExcelEqual()
26.454545
79
0.538832
[ "BSD-2-Clause" ]
CrackerCat/XlsxWriter
xlsxwriter/test/comparison/test_chart_scatter03.py
1,455
Python
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Algorithmic data generators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import numpy as np from six.moves import range # pylint: disable=redefined-builtin from tensor2tensor.data_generators import generator_utils as utils from tensor2tensor.data_generators import problem from tensor2tensor.data_generators import text_encoder from tensor2tensor.layers import modalities from tensor2tensor.utils import metrics from tensor2tensor.utils import registry import tensorflow as tf class AlgorithmicProblem(problem.Problem): """Base class for algorithmic problems.""" @property def num_symbols(self): raise NotImplementedError() def generator(self, nbr_symbols, max_length, nbr_cases): """Generates the data.""" raise NotImplementedError() @property def train_length(self): return 40 @property def dev_length(self): return 400 @property def train_size(self): return 100000 @property def dev_size(self): return 10000 @property def num_shards(self): return 10 def generate_data(self, data_dir, _, task_id=-1): def generator_eos(nbr_symbols, max_length, nbr_cases): """Shift by NUM_RESERVED_IDS and append EOS token.""" for case in self.generator(nbr_symbols, max_length, nbr_cases): new_case = {} for feature in case: new_case[feature] = [ i + text_encoder.NUM_RESERVED_TOKENS for i in case[feature] ] + [text_encoder.EOS_ID] yield new_case utils.generate_dataset_and_shuffle( generator_eos(self.num_symbols, self.train_length, self.train_size), self.training_filepaths(data_dir, self.num_shards, shuffled=True), generator_eos(self.num_symbols, self.dev_length, self.dev_size), self.dev_filepaths(data_dir, 1, shuffled=True), shuffle=False) def hparams(self, defaults, unused_model_hparams): p = defaults vocab_size = self.num_symbols + text_encoder.NUM_RESERVED_TOKENS p.modality = {"inputs": modalities.ModalityType.SYMBOL, "targets": modalities.ModalityType.SYMBOL} p.vocab_size = {"inputs": vocab_size, "targets": vocab_size} p.input_space_id = problem.SpaceID.DIGIT_0 p.target_space_id = problem.SpaceID.DIGIT_1 @registry.register_problem class AlgorithmicIdentityBinary40(AlgorithmicProblem): """Problem spec for algorithmic binary identity task.""" @property def num_symbols(self): return 2 def generator(self, nbr_symbols, max_length, nbr_cases): """Generator for the identity (copy) task on sequences of symbols. The length of the sequence is drawn uniformly at random from [1, max_length] and then symbols are drawn uniformly at random from [0, nbr_symbols) until nbr_cases sequences have been produced. Args: nbr_symbols: number of symbols to use in each sequence. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list and target-list are the same. """ for _ in range(nbr_cases): l = np.random.randint(max_length) + 1 inputs = [np.random.randint(nbr_symbols) for _ in range(l)] yield {"inputs": inputs, "targets": inputs} @registry.register_problem class AlgorithmicIdentityDecimal40(AlgorithmicIdentityBinary40): """Problem spec for algorithmic decimal identity task.""" @property def num_symbols(self): return 10 @registry.register_problem class AlgorithmicShiftDecimal40(AlgorithmicProblem): """Problem spec for algorithmic decimal shift task.""" @property def num_symbols(self): return 20 def generator(self, nbr_symbols, max_length, nbr_cases): """Generator for the shift task on sequences of symbols. The length of the sequence is drawn uniformly at random from [1, max_length] and then symbols are drawn uniformly at random from [0, nbr_symbols - shift] until nbr_cases sequences have been produced (output[i] = input[i] + shift). Args: nbr_symbols: number of symbols to use in each sequence (input + output). max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where target-list[i] = input-list[i] + shift. """ shift = 10 for _ in range(nbr_cases): l = np.random.randint(max_length) + 1 inputs = [np.random.randint(nbr_symbols - shift) for _ in range(l)] yield {"inputs": inputs, "targets": [i + shift for i in inputs]} @property def dev_length(self): return 80 @registry.register_problem class AlgorithmicReverseBinary40(AlgorithmicProblem): """Problem spec for algorithmic binary reversing task.""" @property def num_symbols(self): return 2 def generator(self, nbr_symbols, max_length, nbr_cases): """Generator for the reversing task on sequences of symbols. The length of the sequence is drawn uniformly at random from [1, max_length] and then symbols are drawn uniformly at random from [0, nbr_symbols) until nbr_cases sequences have been produced. Args: nbr_symbols: number of symbols to use in each sequence. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where target-list is input-list reversed. """ for _ in range(nbr_cases): l = np.random.randint(max_length) + 1 inputs = [np.random.randint(nbr_symbols) for _ in range(l)] yield {"inputs": inputs, "targets": list(reversed(inputs))} @registry.register_problem class AlgorithmicReverseDecimal40(AlgorithmicReverseBinary40): """Problem spec for algorithmic decimal reversing task.""" @property def num_symbols(self): return 10 def zipf_distribution(nbr_symbols, alpha): """Helper function: Create a Zipf distribution. Args: nbr_symbols: number of symbols to use in the distribution. alpha: float, Zipf's Law Distribution parameter. Default = 1.5. Usually for modelling natural text distribution is in the range [1.1-1.6]. Returns: distr_map: list of float, Zipf's distribution over nbr_symbols. """ tmp = np.power(np.arange(1, nbr_symbols + 1), -alpha) zeta = np.r_[0.0, np.cumsum(tmp)] return [x / zeta[-1] for x in zeta] def zipf_random_sample(distr_map, sample_len): """Helper function: Generate a random Zipf sample of given length. Args: distr_map: list of float, Zipf's distribution over nbr_symbols. sample_len: integer, length of sequence to generate. Returns: sample: list of integer, Zipf's random sample over nbr_symbols. """ u = np.random.random(sample_len) # Random produces values in range [0.0,1.0); even if it is almost # improbable(but possible) that it can generate a clear 0.000..0. return list(np.searchsorted(distr_map, u)) def reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, scale_std_dev=100, alpha=1.5): """Generator for the reversing nlp-like task on sequences of symbols. The length of the sequence is drawn from a Gaussian(Normal) distribution at random from [1, max_length] and with std deviation of 1%, then symbols are drawn from Zipf's law at random from [0, nbr_symbols) until nbr_cases sequences have been produced. Args: nbr_symbols: integer, number of symbols. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. scale_std_dev: float, Normal distribution's standard deviation scale factor used to draw the length of sequence. Default = 1% of the max_length. alpha: float, Zipf's Law Distribution parameter. Default = 1.5. Usually for modelling natural text distribution is in the range [1.1-1.6]. Yields: A dictionary {"inputs": input-list, "targets": target-list} where target-list is input-list reversed. """ std_dev = max_length / scale_std_dev distr_map = zipf_distribution(nbr_symbols, alpha) for _ in range(nbr_cases): l = int(abs(np.random.normal(loc=max_length / 2, scale=std_dev)) + 1) inputs = zipf_random_sample(distr_map, l) yield {"inputs": inputs, "targets": list(reversed(inputs))} @registry.register_problem class AlgorithmicReverseNlplike8k(AlgorithmicProblem): """Problem spec for algorithmic nlp-like reversing task.""" @property def num_symbols(self): return 8000 def generator(self, nbr_symbols, max_length, nbr_cases): return reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, 10, 1.300) @property def train_length(self): return 70 @property def dev_length(self): return 70 @registry.register_problem class AlgorithmicReverseNlplike32k(AlgorithmicReverseNlplike8k): """Problem spec for algorithmic nlp-like reversing task, 32k vocab.""" @property def num_symbols(self): return 32000 def generator(self, nbr_symbols, max_length, nbr_cases): return reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, 10, 1.050) def lower_endian_to_number(l, base): """Helper function: convert a list of digits in the given base to a number.""" return sum([d * (base**i) for i, d in enumerate(l)]) def number_to_lower_endian(n, base): """Helper function: convert a number to a list of digits in the given base.""" if n < base: return [n] return [n % base] + number_to_lower_endian(n // base, base) def random_number_lower_endian(length, base): """Helper function: generate a random number as a lower-endian digits list.""" if length == 1: # Last digit can be 0 only if length is 1. return [np.random.randint(base)] prefix = [np.random.randint(base) for _ in range(length - 1)] return prefix + [np.random.randint(base - 1) + 1] # Last digit is not 0. @registry.register_problem class AlgorithmicAdditionBinary40(AlgorithmicProblem): """Problem spec for algorithmic binary addition task.""" @property def num_symbols(self): return 2 def generator(self, base, max_length, nbr_cases): # pylint: disable=arguments-differ """Generator for the addition task. The length of each number is drawn uniformly at random in [1, max_length/2] and then digits are drawn uniformly at random. The numbers are added and separated by [base] in the input. Stops at nbr_cases. Args: base: in which base are the numbers. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the 2 numbers and target-list is the result of adding them. Raises: ValueError: if max_length is lower than 3. """ if max_length < 3: raise ValueError("Maximum length must be at least 3.") for _ in range(nbr_cases): l1 = np.random.randint(max_length // 2) + 1 l2 = np.random.randint(max_length - l1 - 1) + 1 n1 = random_number_lower_endian(l1, base) n2 = random_number_lower_endian(l2, base) result = lower_endian_to_number(n1, base) + lower_endian_to_number( n2, base) inputs = n1 + [base] + n2 targets = number_to_lower_endian(result, base) yield {"inputs": inputs, "targets": targets} @registry.register_problem class AlgorithmicAdditionDecimal40(AlgorithmicAdditionBinary40): """Problem spec for algorithmic decimal addition task.""" @property def num_symbols(self): return 10 @registry.register_problem class AlgorithmicMultiplicationBinary40(AlgorithmicProblem): """Problem spec for algorithmic binary multiplication task.""" @property def num_symbols(self): return 2 def generator(self, base, max_length, nbr_cases): # pylint: disable=arguments-differ """Generator for the multiplication task. The length of each number is drawn uniformly at random in [1, max_length/2] and then digits are drawn uniformly at random. The numbers are multiplied and separated by [base] in the input. Stops at nbr_cases. Args: base: in which base are the numbers. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the 2 numbers and target-list is the result of multiplying them. Raises: ValueError: if max_length is lower than 3. """ if max_length < 3: raise ValueError("Maximum length must be at least 3.") for _ in range(nbr_cases): l1 = np.random.randint(max_length // 2) + 1 l2 = np.random.randint(max_length - l1 - 1) + 1 n1 = random_number_lower_endian(l1, base) n2 = random_number_lower_endian(l2, base) result = lower_endian_to_number(n1, base) * lower_endian_to_number( n2, base) inputs = n1 + [base] + n2 targets = number_to_lower_endian(result, base) yield {"inputs": inputs, "targets": targets} @registry.register_problem class AlgorithmicMultiplicationDecimal40(AlgorithmicMultiplicationBinary40): """Problem spec for algorithmic decimal multiplication task.""" @property def num_symbols(self): return 10 @registry.register_problem class AlgorithmicReverseBinary40Test(AlgorithmicReverseBinary40): """Test Problem with tiny dataset.""" @property def train_length(self): return 10 @property def dev_length(self): return 10 @property def train_size(self): return 1000 @property def dev_size(self): return 100 @property def num_shards(self): return 1 @registry.register_problem class AlgorithmicSortProblem(AlgorithmicProblem): """Problem spec for sorting numbers.""" @property def num_symbols(self): return max(self.train_length, self.dev_length) @property def train_length(self): return 10 @property def dev_length(self): return self.train_length * 2 @property def unique(self): """Unique numbers wo/ replacement or w/ replacement in sorting task.""" return False def generator(self, nbr_symbols, max_length, nbr_cases): """Generating for sorting task on sequence of symbols. The length of the sequence is drawn uniformly at random from [1, max_length] and then symbols are drawn (uniquely w/ or w/o replacement) uniformly at random from [0, nbr_symbols) until nbr_cases sequences have been produced. Args: nbr_symbols: number of symbols to use in each sequence. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where target-list is input-list sorted. """ for _ in range(nbr_cases): # Sample the sequence length. length = np.random.randint(max_length) + 1 if self.unique: # Sample our inputs w/o replacement. inputs = np.arange(nbr_symbols) np.random.shuffle(inputs) # Truncate to the desired length. inputs = inputs[:length] inputs = list(inputs) else: inputs = list(np.random.randint(nbr_symbols, size=length)) # Targets are simply the sorted inputs. targets = list(sorted(inputs)) yield {"inputs": inputs, "targets": targets} def eval_metrics(self): defaults = super(AlgorithmicSortProblem, self).eval_metrics() return defaults + [metrics.Metrics.EDIT_DISTANCE] @registry.register_problem class TinyAlgo(AlgorithmicIdentityBinary40): """A small algorthmic problem for testing.""" def generate_data(self, data_dir, tmp_dir, task_id=-1): """Ganerate data for this problem.""" del tmp_dir, task_id identity_problem = AlgorithmicIdentityBinary40() utils.generate_files( identity_problem.generator(self.num_symbols, 40, 100000), self.training_filepaths(data_dir, 1, shuffled=True), 100) utils.generate_files( identity_problem.generator(self.num_symbols, 400, 10000), self.dev_filepaths(data_dir, 1, shuffled=True), 100) @classmethod def setup_for_test(cls): """Setup directories and files required to run the problem.""" tmp_dir = tf.test.get_temp_dir() shutil.rmtree(tmp_dir) os.mkdir(tmp_dir) cls.data_dir = tmp_dir # Generate a small test dataset cls().generate_data(TinyAlgo.data_dir, None)
32.06044
87
0.70517
[ "Apache-2.0" ]
PedroLelis/tensor2tensor
tensor2tensor/data_generators/algorithmic.py
17,505
Python
#!/usr/bin/env python from setuptools import setup, find_packages setup( name='async-gelf-handler', version='0.1.4', description="An async wrapper around the GELF (Graylog Extended Log Format).", long_description=open('README.rst').read(), keywords='logging gelf graylog2 graylog async', author='Developer', author_email='[email protected]', url='https://github.com/listingmirror/async-gelf-handler', license='BSD License', packages=find_packages(), include_package_data=True, zip_safe=False, install_requires=['graypy>=0.2.13.2'], classifiers=['License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3'], )
34.954545
82
0.6671
[ "BSD-3-Clause" ]
listingmirror/async-gelf-handler
setup.py
769
Python
from flask import Flask app = Flask(__name__, static_url_path='', static_folder='static') app.config['DEBUG'] = True @app.route('/') def root(): # Note: this is probably handled by the app engine static file handler. return app.send_static_file('index.html') @app.errorhandler(404) def page_not_found(e): """Return a custom 404 error.""" return 'Sorry, nothing at this URL.', 404
28.142857
73
0.71066
[ "Apache-2.0" ]
rekab/papt
main.py
394
Python
import random import math import numpy as np from typing import List class EpsilonGreedy: def __init__(self, epsilon: float, counts: List[int], values: List[float]): assert epsilon is None or 0.0 <= epsilon <= 1.0 self.epsilon = epsilon self.counts = counts self.values = values def initialize(self, n_arms): self.counts = [0] * n_arms self.values = [0.0] * n_arms def select_arm(self): epsilon = self.epsilon if epsilon is None: t = sum(self.counts) + 1 epsilon = 1 / math.log(t + 0.0000001) # 活用 if random.random() > epsilon: return np.argmax(self.values) # 探索 else: return random.randrange(len(self.values)) def update(self, chosen_arm, reward): self.counts[chosen_arm] += 1 n = self.counts[chosen_arm] value = self.values[chosen_arm] self.values[chosen_arm] = ((n - 1) / float(n)) * value + (1 / float(n)) * reward # online average def __str__(self): return "EpsilonGreedy(epsilon={0})".format(self.epsilon)
28.175
108
0.584738
[ "MIT" ]
MitI-7/MachineLearning
ReinforcementLearning/Bandit/EpsilonGreedy.py
1,135
Python
""" Generating data from the CarRacing gym environment. !!! DOES NOT WORK ON TITANIC, DO IT AT HOME, THEN SCP !!! """ import argparse from os.path import join, exists import gym import numpy as np from utils.misc import sample_continuous_policy def generate_data(rollouts, data_dir, noise_type): # pylint: disable=R0914 """ Generates data """ assert exists(data_dir), "The data directory does not exist..." env = gym.make("CarRacing-v0") seq_len = 1000 for i in range(rollouts): env.reset() env.env.viewer.window.dispatch_events() if noise_type == 'white': a_rollout = [env.action_space.sample() for _ in range(seq_len)] elif noise_type == 'brown': a_rollout = sample_continuous_policy(env.action_space, seq_len, 1. / 50) s_rollout = [] r_rollout = [] d_rollout = [] t = 0 while True: action = a_rollout[t] t += 1 # The CarRacing-v0 environment has a step limit of 1000, this can be seen in env.spec.max_episode_steps s, r, done, _ = env.step(action) env.env.viewer.window.dispatch_events() s_rollout += [s] r_rollout += [r] d_rollout += [done] if done: # Because these are random policies, most of them will not be done before the step limit of 1000 print("> End of rollout {}, {} frames...".format(i, len(s_rollout))) np.savez(join(data_dir, 'rollout_{}'.format(i)), observations=np.array(s_rollout), rewards=np.array(r_rollout), actions=np.array(a_rollout), terminals=np.array(d_rollout)) break if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--rollouts', type=int, help="Number of rollouts") parser.add_argument('--dir', type=str, help="Where to place rollouts") parser.add_argument('--policy', type=str, choices=['white', 'brown'], help='Noise type used for action sampling.', default='brown') args = parser.parse_args() generate_data(args.rollouts, args.dir, args.policy)
38
115
0.586404
[ "MIT" ]
susanwe/world-models
data/carracing.py
2,280
Python
""" Entradas Monto de dinero -> int -> a """ a = int ( input ( "Ingrese monto de dinero en COP:" )) b = a billetes_de_100000 = ( b - b % 100000 ) / 100000 b = b % 100000 billetes_de_50000 = ( b - b % 50000 ) / 50000 b = b % 50000 billetes_de_20000 = ( b - b % 20000 ) / 20000 b = b % 20000 billetes_de_10000 = ( b - b % 10000 ) / 10000 b = b % 10000 billetes_de_5000 = ( b - b % 5000 ) / 5000 b = b % 5000 billetes_de_2000 = ( b - b % 2000 ) / 2000 b = b % 2000 billetes_de_1000 = ( b - b % 1000 ) / 1000 b = b % 1000 monedas_de_500 = ( b - b % 500 ) / 500 b = b % 500 monedas_de_200 = ( b - b % 200 ) / 200 b = b % 200 monedas_de_100 = ( b - b % 100 ) / 100 b = b % 100 monedas_de_50 = ( b - b % 50 ) / 50 b = b % 50 print ( "La Cantidad de billetes de 100000 es de:" + str ( billetes_de_100000 )) print ( "La Cantidad de billetes de 50000 es de:" + str ( billetes_de_50000 )) print ( "La Cantidad de billetes de 20000 es de:" + str ( billetes_de_20000 )) print ( "La Cantidad de billetes de 10000 es de:" + str ( billetes_de_10000 )) print ( "La Cantidad de billetes de 5000 es de:" + str ( billetes_de_5000 )) print ( "La Cantidad de billetes de 2000 es de:" + str ( billetes_de_2000 )) print ( "La Cantidad de billetes de 1000 es de:" + str ( billetes_de_1000 )) print ( "La Cantidad de monedas de 500 es de:" + str ( monedas_de_500 )) print ( "La Cantidad de monedas de 200 es de:" + str ( monedas_de_200 )) print ( "La Cantidad de monedas de 100 es de:" + str ( monedas_de_100 )) print ( "La Cantidad de monedas de 50 es de:" + str ( monedas_de_50 ))
39.897436
80
0.63946
[ "MIT" ]
JuanMPerezM/AlgoritmosyProgramacion_Talleres
taller_estructuras_de_control/ejercicio12.py
1,556
Python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import requests import json import datetime url = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid=b10c5ed1-bad1-445f-b386-b919946339a7' r = requests.get(url) service_list = r.json() lurls= [] lips = [] for service in service_list: for url in service.get('urls', []): lurls.append(url.replace('*', '')) for ip in service.get('ips', []): lips.append(ip) warninglist = {} warninglist['name'] = 'List of known Office 365 URLs address ranges' warninglist['version'] = int(datetime.date.today().strftime('%Y%m%d')) warninglist['description'] = 'Office 365 URLs and IP address ranges' warninglist['type'] = 'string' warninglist['list'] = sorted(set(lurls)) warninglist['matching_attributes'] = ["domain", "domain|ip", "hostname"] with open('../lists/microsoft-office365/list.json', 'w') as data_file: json.dump(warninglist, data_file, indent=4, sort_keys=True) warninglist = {} warninglist['name'] = 'List of known Office 365 IP address ranges' warninglist['version'] = int(datetime.date.today().strftime('%Y%m%d')) warninglist['description'] = 'Office 365 URLs and IP address ranges' warninglist['list'] = sorted(set(lips)) warninglist['type'] = 'cidr' warninglist['matching_attributes'] = ["ip-src", "ip-dst", "domain|ip"] with open('../lists/microsoft-office365-ip/list.json', 'w') as data_file: json.dump(warninglist, data_file, indent=4, sort_keys=True)
33.906977
109
0.699588
[ "CC0-1.0" ]
JakubOnderka/misp-warninglists
tools/generate-office365.py
1,458
Python
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ._enums import * __all__ = [ 'AcceleratorConfigArgs', 'AccessConfigArgs', 'AdvancedMachineFeaturesArgs', 'AliasIpRangeArgs', 'AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs', 'AllocationSpecificSKUAllocationReservedInstancePropertiesArgs', 'AllocationSpecificSKUReservationArgs', 'AttachedDiskInitializeParamsArgs', 'AttachedDiskArgs', 'AuditConfigArgs', 'AuditLogConfigArgs', 'AuthorizationLoggingOptionsArgs', 'AutoscalingPolicyCpuUtilizationArgs', 'AutoscalingPolicyCustomMetricUtilizationArgs', 'AutoscalingPolicyLoadBalancingUtilizationArgs', 'AutoscalingPolicyScaleDownControlArgs', 'AutoscalingPolicyScaleInControlArgs', 'AutoscalingPolicyArgs', 'BackendBucketCdnPolicyBypassCacheOnRequestHeaderArgs', 'BackendBucketCdnPolicyCacheKeyPolicyArgs', 'BackendBucketCdnPolicyNegativeCachingPolicyArgs', 'BackendBucketCdnPolicyArgs', 'BackendServiceCdnPolicyBypassCacheOnRequestHeaderArgs', 'BackendServiceCdnPolicyNegativeCachingPolicyArgs', 'BackendServiceCdnPolicyArgs', 'BackendServiceConnectionTrackingPolicyArgs', 'BackendServiceFailoverPolicyArgs', 'BackendServiceIAPOAuth2ClientInfoArgs', 'BackendServiceIAPArgs', 'BackendServiceLogConfigArgs', 'BackendArgs', 'BindingArgs', 'CacheKeyPolicyArgs', 'CallCredentialsArgs', 'ChannelCredentialsArgs', 'CircuitBreakersArgs', 'ConditionArgs', 'ConfidentialInstanceConfigArgs', 'ConnectionDrainingArgs', 'ConsistentHashLoadBalancerSettingsHttpCookieArgs', 'ConsistentHashLoadBalancerSettingsArgs', 'CorsPolicyArgs', 'CustomerEncryptionKeyArgs', 'DeprecationStatusArgs', 'DiskInstantiationConfigArgs', 'DisplayDeviceArgs', 'DistributionPolicyZoneConfigurationArgs', 'DistributionPolicyArgs', 'DurationArgs', 'ExprArgs', 'ExternalVpnGatewayInterfaceArgs', 'FileContentBufferArgs', 'FirewallAllowedItemArgs', 'FirewallDeniedItemArgs', 'FirewallLogConfigArgs', 'FirewallPolicyAssociationArgs', 'FirewallPolicyRuleMatcherLayer4ConfigArgs', 'FirewallPolicyRuleMatcherArgs', 'FirewallPolicyRuleSecureTagArgs', 'FirewallPolicyRuleArgs', 'FixedOrPercentArgs', 'ForwardingRuleServiceDirectoryRegistrationArgs', 'FutureReservationSpecificSKUPropertiesArgs', 'FutureReservationTimeWindowArgs', 'GRPCHealthCheckArgs', 'GrpcServiceConfigArgs', 'GuestOsFeatureArgs', 'HTTP2HealthCheckArgs', 'HTTPHealthCheckArgs', 'HTTPSHealthCheckArgs', 'HealthCheckLogConfigArgs', 'HostRuleArgs', 'HttpFaultAbortArgs', 'HttpFaultDelayArgs', 'HttpFaultInjectionArgs', 'HttpFilterConfigArgs', 'HttpHeaderActionArgs', 'HttpHeaderMatchArgs', 'HttpHeaderOptionArgs', 'HttpQueryParameterMatchArgs', 'HttpRedirectActionArgs', 'HttpRetryPolicyArgs', 'HttpRouteActionArgs', 'HttpRouteRuleMatchArgs', 'HttpRouteRuleArgs', 'ImageRawDiskArgs', 'InitialStateConfigArgs', 'InstanceGroupManagerAllInstancesConfigArgs', 'InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersArgs', 'InstanceGroupManagerAutoHealingPolicyArgs', 'InstanceGroupManagerInstanceLifecyclePolicyMetadataBasedReadinessSignalArgs', 'InstanceGroupManagerInstanceLifecyclePolicyArgs', 'InstanceGroupManagerStandbyPolicyArgs', 'InstanceGroupManagerUpdatePolicyArgs', 'InstanceGroupManagerVersionArgs', 'InstanceParamsArgs', 'InstancePropertiesPatchArgs', 'InstancePropertiesArgs', 'Int64RangeMatchArgs', 'InterconnectAttachmentPartnerMetadataArgs', 'InterconnectMacsecPreSharedKeyArgs', 'InterconnectMacsecArgs', 'LicenseResourceCommitmentArgs', 'LicenseResourceRequirementsArgs', 'LocalDiskArgs', 'LogConfigCloudAuditOptionsArgs', 'LogConfigCounterOptionsCustomFieldArgs', 'LogConfigCounterOptionsArgs', 'LogConfigDataAccessOptionsArgs', 'LogConfigArgs', 'MetadataCredentialsFromPluginArgs', 'MetadataFilterLabelMatchArgs', 'MetadataFilterArgs', 'MetadataItemsItemArgs', 'MetadataArgs', 'NamedPortArgs', 'NetworkEndpointGroupAppEngineArgs', 'NetworkEndpointGroupCloudFunctionArgs', 'NetworkEndpointGroupCloudRunArgs', 'NetworkEndpointGroupServerlessDeploymentArgs', 'NetworkInterfaceSubInterfaceArgs', 'NetworkInterfaceArgs', 'NetworkPerformanceConfigArgs', 'NetworkRoutingConfigArgs', 'NodeGroupAutoscalingPolicyArgs', 'NodeGroupMaintenanceWindowArgs', 'NodeTemplateNodeTypeFlexibilityArgs', 'NotificationEndpointGrpcSettingsArgs', 'OutlierDetectionArgs', 'PacketMirroringFilterArgs', 'PacketMirroringForwardingRuleInfoArgs', 'PacketMirroringMirroredResourceInfoInstanceInfoArgs', 'PacketMirroringMirroredResourceInfoSubnetInfoArgs', 'PacketMirroringMirroredResourceInfoArgs', 'PacketMirroringNetworkInfoArgs', 'PathMatcherArgs', 'PathRuleArgs', 'PublicDelegatedPrefixPublicDelegatedSubPrefixArgs', 'RequestMirrorPolicyArgs', 'ReservationAffinityArgs', 'ReservationArgs', 'ResourceCommitmentArgs', 'ResourcePolicyDailyCycleArgs', 'ResourcePolicyGroupPlacementPolicyArgs', 'ResourcePolicyHourlyCycleArgs', 'ResourcePolicyInstanceSchedulePolicyScheduleArgs', 'ResourcePolicyInstanceSchedulePolicyArgs', 'ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs', 'ResourcePolicySnapshotSchedulePolicyScheduleArgs', 'ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs', 'ResourcePolicySnapshotSchedulePolicyArgs', 'ResourcePolicyVmMaintenancePolicyConcurrencyControlArgs', 'ResourcePolicyVmMaintenancePolicyMaintenanceWindowArgs', 'ResourcePolicyVmMaintenancePolicyArgs', 'ResourcePolicyWeeklyCycleDayOfWeekArgs', 'ResourcePolicyWeeklyCycleArgs', 'RolloutPolicyArgs', 'RouterAdvertisedIpRangeArgs', 'RouterBgpPeerBfdArgs', 'RouterBgpPeerArgs', 'RouterBgpArgs', 'RouterInterfaceArgs', 'RouterNatLogConfigArgs', 'RouterNatRuleActionArgs', 'RouterNatRuleArgs', 'RouterNatSubnetworkToNatArgs', 'RouterNatArgs', 'RuleArgs', 'SSLHealthCheckArgs', 'SavedDiskArgs', 'SchedulingNodeAffinityArgs', 'SchedulingArgs', 'SdsConfigArgs', 'SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigArgs', 'SecurityPolicyAdaptiveProtectionConfigArgs', 'SecurityPolicyAdvancedOptionsConfigArgs', 'SecurityPolicyAssociationArgs', 'SecurityPolicyCloudArmorConfigArgs', 'SecurityPolicyDdosProtectionConfigArgs', 'SecurityPolicyRecaptchaOptionsConfigArgs', 'SecurityPolicyRuleHttpHeaderActionHttpHeaderOptionArgs', 'SecurityPolicyRuleHttpHeaderActionArgs', 'SecurityPolicyRuleMatcherConfigDestinationPortArgs', 'SecurityPolicyRuleMatcherConfigLayer4ConfigArgs', 'SecurityPolicyRuleMatcherConfigArgs', 'SecurityPolicyRuleMatcherArgs', 'SecurityPolicyRuleRateLimitOptionsThresholdArgs', 'SecurityPolicyRuleRateLimitOptionsArgs', 'SecurityPolicyRuleRedirectOptionsArgs', 'SecurityPolicyRuleArgs', 'SecuritySettingsArgs', 'ServerBindingArgs', 'ServerTlsSettingsArgs', 'ServiceAccountArgs', 'ServiceAttachmentConsumerProjectLimitArgs', 'ShareSettingsArgs', 'ShieldedInstanceConfigArgs', 'ShieldedInstanceIntegrityPolicyArgs', 'ShieldedVmConfigArgs', 'ShieldedVmIntegrityPolicyArgs', 'SourceDiskEncryptionKeyArgs', 'SourceInstanceParamsArgs', 'SslCertificateManagedSslCertificateArgs', 'SslCertificateSelfManagedSslCertificateArgs', 'StatefulPolicyPreservedStateArgs', 'StatefulPolicyArgs', 'SubnetworkLogConfigArgs', 'SubnetworkSecondaryRangeArgs', 'SubsettingArgs', 'TCPHealthCheckArgs', 'TagsArgs', 'TlsCertificateContextArgs', 'TlsCertificatePathsArgs', 'TlsContextArgs', 'TlsValidationContextArgs', 'UDPHealthCheckArgs', 'UrlMapTestHeaderArgs', 'UrlMapTestArgs', 'UrlRewriteArgs', 'VpnGatewayVpnGatewayInterfaceArgs', 'WeightedBackendServiceArgs', ] @pulumi.input_type class AcceleratorConfigArgs: def __init__(__self__, *, accelerator_count: Optional[pulumi.Input[int]] = None, accelerator_type: Optional[pulumi.Input[str]] = None): """ A specification of the type and number of accelerator cards attached to the instance. :param pulumi.Input[int] accelerator_count: The number of the guest accelerator cards exposed to this instance. :param pulumi.Input[str] accelerator_type: Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types. """ if accelerator_count is not None: pulumi.set(__self__, "accelerator_count", accelerator_count) if accelerator_type is not None: pulumi.set(__self__, "accelerator_type", accelerator_type) @property @pulumi.getter(name="acceleratorCount") def accelerator_count(self) -> Optional[pulumi.Input[int]]: """ The number of the guest accelerator cards exposed to this instance. """ return pulumi.get(self, "accelerator_count") @accelerator_count.setter def accelerator_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "accelerator_count", value) @property @pulumi.getter(name="acceleratorType") def accelerator_type(self) -> Optional[pulumi.Input[str]]: """ Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types. """ return pulumi.get(self, "accelerator_type") @accelerator_type.setter def accelerator_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "accelerator_type", value) @pulumi.input_type class AccessConfigArgs: def __init__(__self__, *, external_ipv6: Optional[pulumi.Input[str]] = None, external_ipv6_prefix_length: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, nat_ip: Optional[pulumi.Input[str]] = None, network_tier: Optional[pulumi.Input['AccessConfigNetworkTier']] = None, public_ptr_domain_name: Optional[pulumi.Input[str]] = None, set_public_dns: Optional[pulumi.Input[bool]] = None, set_public_ptr: Optional[pulumi.Input[bool]] = None, type: Optional[pulumi.Input['AccessConfigType']] = None): """ An access configuration attached to an instance's network interface. Only one access config per instance is supported. :param pulumi.Input[str] external_ipv6: The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically. :param pulumi.Input[int] external_ipv6_prefix_length: The prefix length of the external IPv6 range. :param pulumi.Input[str] name: The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. :param pulumi.Input[str] nat_ip: An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. :param pulumi.Input['AccessConfigNetworkTier'] network_tier: This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. :param pulumi.Input[str] public_ptr_domain_name: The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. :param pulumi.Input[bool] set_public_dns: Specifies whether a public DNS 'A' record should be created for the external IP address of this access configuration. :param pulumi.Input[bool] set_public_ptr: Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. :param pulumi.Input['AccessConfigType'] type: The type of configuration. The default and only option is ONE_TO_ONE_NAT. """ if external_ipv6 is not None: pulumi.set(__self__, "external_ipv6", external_ipv6) if external_ipv6_prefix_length is not None: pulumi.set(__self__, "external_ipv6_prefix_length", external_ipv6_prefix_length) if name is not None: pulumi.set(__self__, "name", name) if nat_ip is not None: pulumi.set(__self__, "nat_ip", nat_ip) if network_tier is not None: pulumi.set(__self__, "network_tier", network_tier) if public_ptr_domain_name is not None: pulumi.set(__self__, "public_ptr_domain_name", public_ptr_domain_name) if set_public_dns is not None: pulumi.set(__self__, "set_public_dns", set_public_dns) if set_public_ptr is not None: pulumi.set(__self__, "set_public_ptr", set_public_ptr) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="externalIpv6") def external_ipv6(self) -> Optional[pulumi.Input[str]]: """ The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically. """ return pulumi.get(self, "external_ipv6") @external_ipv6.setter def external_ipv6(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "external_ipv6", value) @property @pulumi.getter(name="externalIpv6PrefixLength") def external_ipv6_prefix_length(self) -> Optional[pulumi.Input[int]]: """ The prefix length of the external IPv6 range. """ return pulumi.get(self, "external_ipv6_prefix_length") @external_ipv6_prefix_length.setter def external_ipv6_prefix_length(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "external_ipv6_prefix_length", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="natIP") def nat_ip(self) -> Optional[pulumi.Input[str]]: """ An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. """ return pulumi.get(self, "nat_ip") @nat_ip.setter def nat_ip(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_ip", value) @property @pulumi.getter(name="networkTier") def network_tier(self) -> Optional[pulumi.Input['AccessConfigNetworkTier']]: """ This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. """ return pulumi.get(self, "network_tier") @network_tier.setter def network_tier(self, value: Optional[pulumi.Input['AccessConfigNetworkTier']]): pulumi.set(self, "network_tier", value) @property @pulumi.getter(name="publicPtrDomainName") def public_ptr_domain_name(self) -> Optional[pulumi.Input[str]]: """ The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. """ return pulumi.get(self, "public_ptr_domain_name") @public_ptr_domain_name.setter def public_ptr_domain_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "public_ptr_domain_name", value) @property @pulumi.getter(name="setPublicDns") def set_public_dns(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether a public DNS 'A' record should be created for the external IP address of this access configuration. """ return pulumi.get(self, "set_public_dns") @set_public_dns.setter def set_public_dns(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "set_public_dns", value) @property @pulumi.getter(name="setPublicPtr") def set_public_ptr(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. """ return pulumi.get(self, "set_public_ptr") @set_public_ptr.setter def set_public_ptr(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "set_public_ptr", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['AccessConfigType']]: """ The type of configuration. The default and only option is ONE_TO_ONE_NAT. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['AccessConfigType']]): pulumi.set(self, "type", value) @pulumi.input_type class AdvancedMachineFeaturesArgs: def __init__(__self__, *, enable_nested_virtualization: Optional[pulumi.Input[bool]] = None, enable_uefi_networking: Optional[pulumi.Input[bool]] = None, numa_node_count: Optional[pulumi.Input[int]] = None, threads_per_core: Optional[pulumi.Input[int]] = None, visible_core_count: Optional[pulumi.Input[int]] = None): """ Specifies options for controlling advanced machine features. Options that would traditionally be configured in a BIOS belong here. Features that require operating system support may have corresponding entries in the GuestOsFeatures of an Image (e.g., whether or not the OS in the Image supports nested virtualization being enabled or disabled). :param pulumi.Input[bool] enable_nested_virtualization: Whether to enable nested virtualization or not (default is false). :param pulumi.Input[bool] enable_uefi_networking: Whether to enable UEFI networking for instance creation. :param pulumi.Input[int] numa_node_count: The number of vNUMA nodes. :param pulumi.Input[int] threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. :param pulumi.Input[int] visible_core_count: The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width. """ if enable_nested_virtualization is not None: pulumi.set(__self__, "enable_nested_virtualization", enable_nested_virtualization) if enable_uefi_networking is not None: pulumi.set(__self__, "enable_uefi_networking", enable_uefi_networking) if numa_node_count is not None: pulumi.set(__self__, "numa_node_count", numa_node_count) if threads_per_core is not None: pulumi.set(__self__, "threads_per_core", threads_per_core) if visible_core_count is not None: pulumi.set(__self__, "visible_core_count", visible_core_count) @property @pulumi.getter(name="enableNestedVirtualization") def enable_nested_virtualization(self) -> Optional[pulumi.Input[bool]]: """ Whether to enable nested virtualization or not (default is false). """ return pulumi.get(self, "enable_nested_virtualization") @enable_nested_virtualization.setter def enable_nested_virtualization(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_nested_virtualization", value) @property @pulumi.getter(name="enableUefiNetworking") def enable_uefi_networking(self) -> Optional[pulumi.Input[bool]]: """ Whether to enable UEFI networking for instance creation. """ return pulumi.get(self, "enable_uefi_networking") @enable_uefi_networking.setter def enable_uefi_networking(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_uefi_networking", value) @property @pulumi.getter(name="numaNodeCount") def numa_node_count(self) -> Optional[pulumi.Input[int]]: """ The number of vNUMA nodes. """ return pulumi.get(self, "numa_node_count") @numa_node_count.setter def numa_node_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "numa_node_count", value) @property @pulumi.getter(name="threadsPerCore") def threads_per_core(self) -> Optional[pulumi.Input[int]]: """ The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. """ return pulumi.get(self, "threads_per_core") @threads_per_core.setter def threads_per_core(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "threads_per_core", value) @property @pulumi.getter(name="visibleCoreCount") def visible_core_count(self) -> Optional[pulumi.Input[int]]: """ The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width. """ return pulumi.get(self, "visible_core_count") @visible_core_count.setter def visible_core_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "visible_core_count", value) @pulumi.input_type class AliasIpRangeArgs: def __init__(__self__, *, ip_cidr_range: Optional[pulumi.Input[str]] = None, subnetwork_range_name: Optional[pulumi.Input[str]] = None): """ An alias IP range attached to an instance's network interface. :param pulumi.Input[str] ip_cidr_range: The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24). :param pulumi.Input[str] subnetwork_range_name: The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used. """ if ip_cidr_range is not None: pulumi.set(__self__, "ip_cidr_range", ip_cidr_range) if subnetwork_range_name is not None: pulumi.set(__self__, "subnetwork_range_name", subnetwork_range_name) @property @pulumi.getter(name="ipCidrRange") def ip_cidr_range(self) -> Optional[pulumi.Input[str]]: """ The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24). """ return pulumi.get(self, "ip_cidr_range") @ip_cidr_range.setter def ip_cidr_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_cidr_range", value) @property @pulumi.getter(name="subnetworkRangeName") def subnetwork_range_name(self) -> Optional[pulumi.Input[str]]: """ The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used. """ return pulumi.get(self, "subnetwork_range_name") @subnetwork_range_name.setter def subnetwork_range_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "subnetwork_range_name", value) @pulumi.input_type class AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs: def __init__(__self__, *, disk_size_gb: Optional[pulumi.Input[str]] = None, interface: Optional[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface']] = None): """ :param pulumi.Input[str] disk_size_gb: Specifies the size of the disk in base-2 GB. :param pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface'] interface: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. """ if disk_size_gb is not None: pulumi.set(__self__, "disk_size_gb", disk_size_gb) if interface is not None: pulumi.set(__self__, "interface", interface) @property @pulumi.getter(name="diskSizeGb") def disk_size_gb(self) -> Optional[pulumi.Input[str]]: """ Specifies the size of the disk in base-2 GB. """ return pulumi.get(self, "disk_size_gb") @disk_size_gb.setter def disk_size_gb(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_size_gb", value) @property @pulumi.getter def interface(self) -> Optional[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface']]: """ Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. """ return pulumi.get(self, "interface") @interface.setter def interface(self, value: Optional[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface']]): pulumi.set(self, "interface", value) @pulumi.input_type class AllocationSpecificSKUAllocationReservedInstancePropertiesArgs: def __init__(__self__, *, guest_accelerators: Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]] = None, local_ssds: Optional[pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]]] = None, location_hint: Optional[pulumi.Input[str]] = None, machine_type: Optional[pulumi.Input[str]] = None, maintenance_freeze_duration_hours: Optional[pulumi.Input[int]] = None, maintenance_interval: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval']] = None, min_cpu_platform: Optional[pulumi.Input[str]] = None): """ Properties of the SKU instances being reserved. Next ID: 9 :param pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]] guest_accelerators: Specifies accelerator type and count. :param pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]] local_ssds: Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd. :param pulumi.Input[str] location_hint: An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API. :param pulumi.Input[str] machine_type: Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. :param pulumi.Input[int] maintenance_freeze_duration_hours: Specifies the number of hours after reservation creation where instances using the reservation won't be scheduled for maintenance. :param pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval'] maintenance_interval: For more information about maintenance intervals, see Setting maintenance intervals. :param pulumi.Input[str] min_cpu_platform: Minimum cpu platform the reservation. """ if guest_accelerators is not None: pulumi.set(__self__, "guest_accelerators", guest_accelerators) if local_ssds is not None: pulumi.set(__self__, "local_ssds", local_ssds) if location_hint is not None: pulumi.set(__self__, "location_hint", location_hint) if machine_type is not None: pulumi.set(__self__, "machine_type", machine_type) if maintenance_freeze_duration_hours is not None: pulumi.set(__self__, "maintenance_freeze_duration_hours", maintenance_freeze_duration_hours) if maintenance_interval is not None: pulumi.set(__self__, "maintenance_interval", maintenance_interval) if min_cpu_platform is not None: pulumi.set(__self__, "min_cpu_platform", min_cpu_platform) @property @pulumi.getter(name="guestAccelerators") def guest_accelerators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]]: """ Specifies accelerator type and count. """ return pulumi.get(self, "guest_accelerators") @guest_accelerators.setter def guest_accelerators(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]]): pulumi.set(self, "guest_accelerators", value) @property @pulumi.getter(name="localSsds") def local_ssds(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]]]: """ Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd. """ return pulumi.get(self, "local_ssds") @local_ssds.setter def local_ssds(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]]]): pulumi.set(self, "local_ssds", value) @property @pulumi.getter(name="locationHint") def location_hint(self) -> Optional[pulumi.Input[str]]: """ An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API. """ return pulumi.get(self, "location_hint") @location_hint.setter def location_hint(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location_hint", value) @property @pulumi.getter(name="machineType") def machine_type(self) -> Optional[pulumi.Input[str]]: """ Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. """ return pulumi.get(self, "machine_type") @machine_type.setter def machine_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "machine_type", value) @property @pulumi.getter(name="maintenanceFreezeDurationHours") def maintenance_freeze_duration_hours(self) -> Optional[pulumi.Input[int]]: """ Specifies the number of hours after reservation creation where instances using the reservation won't be scheduled for maintenance. """ return pulumi.get(self, "maintenance_freeze_duration_hours") @maintenance_freeze_duration_hours.setter def maintenance_freeze_duration_hours(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "maintenance_freeze_duration_hours", value) @property @pulumi.getter(name="maintenanceInterval") def maintenance_interval(self) -> Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval']]: """ For more information about maintenance intervals, see Setting maintenance intervals. """ return pulumi.get(self, "maintenance_interval") @maintenance_interval.setter def maintenance_interval(self, value: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval']]): pulumi.set(self, "maintenance_interval", value) @property @pulumi.getter(name="minCpuPlatform") def min_cpu_platform(self) -> Optional[pulumi.Input[str]]: """ Minimum cpu platform the reservation. """ return pulumi.get(self, "min_cpu_platform") @min_cpu_platform.setter def min_cpu_platform(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "min_cpu_platform", value) @pulumi.input_type class AllocationSpecificSKUReservationArgs: def __init__(__self__, *, count: Optional[pulumi.Input[str]] = None, instance_properties: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs']] = None): """ This reservation type allows to pre allocate specific instance configuration. Next ID: 5 :param pulumi.Input[str] count: Specifies the number of resources that are allocated. :param pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs'] instance_properties: The instance properties for the reservation. """ if count is not None: pulumi.set(__self__, "count", count) if instance_properties is not None: pulumi.set(__self__, "instance_properties", instance_properties) @property @pulumi.getter def count(self) -> Optional[pulumi.Input[str]]: """ Specifies the number of resources that are allocated. """ return pulumi.get(self, "count") @count.setter def count(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "count", value) @property @pulumi.getter(name="instanceProperties") def instance_properties(self) -> Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs']]: """ The instance properties for the reservation. """ return pulumi.get(self, "instance_properties") @instance_properties.setter def instance_properties(self, value: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs']]): pulumi.set(self, "instance_properties", value) @pulumi.input_type class AttachedDiskInitializeParamsArgs: def __init__(__self__, *, architecture: Optional[pulumi.Input['AttachedDiskInitializeParamsArchitecture']] = None, description: Optional[pulumi.Input[str]] = None, disk_name: Optional[pulumi.Input[str]] = None, disk_size_gb: Optional[pulumi.Input[str]] = None, disk_type: Optional[pulumi.Input[str]] = None, guest_os_features: Optional[pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, license_codes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, licenses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, multi_writer: Optional[pulumi.Input[bool]] = None, on_update_action: Optional[pulumi.Input['AttachedDiskInitializeParamsOnUpdateAction']] = None, provisioned_iops: Optional[pulumi.Input[str]] = None, replica_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, resource_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_image: Optional[pulumi.Input[str]] = None, source_image_encryption_key: Optional[pulumi.Input['CustomerEncryptionKeyArgs']] = None, source_instant_snapshot: Optional[pulumi.Input[str]] = None, source_snapshot: Optional[pulumi.Input[str]] = None, source_snapshot_encryption_key: Optional[pulumi.Input['CustomerEncryptionKeyArgs']] = None): """ [Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both. :param pulumi.Input['AttachedDiskInitializeParamsArchitecture'] architecture: The architecture of the attached disk. Valid values are arm64 or x86_64. :param pulumi.Input[str] description: An optional description. Provide this property when creating the disk. :param pulumi.Input[str] disk_name: Specifies the disk name. If not specified, the default is to use the name of the instance. If a disk with the same name already exists in the given region, the existing disk is attached to the new instance and the new disk is not created. :param pulumi.Input[str] disk_size_gb: Specifies the size of the disk in base-2 GB. The size must be at least 10 GB. If you specify a sourceImage, which is required for boot disks, the default size is the size of the sourceImage. If you do not specify a sourceImage, the default disk size is 500 GB. :param pulumi.Input[str] disk_type: Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL. :param pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]] guest_os_features: A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. Guest OS features are applied by merging initializeParams.guestOsFeatures and disks.guestOsFeatures :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks. :param pulumi.Input[Sequence[pulumi.Input[str]]] license_codes: Integer license codes indicating which licenses are attached to this disk. :param pulumi.Input[Sequence[pulumi.Input[str]]] licenses: A list of publicly visible licenses. Reserved for Google's use. :param pulumi.Input[bool] multi_writer: Indicates whether or not the disk can be read/write attached to more than one instance. :param pulumi.Input['AttachedDiskInitializeParamsOnUpdateAction'] on_update_action: Specifies which action to take on instance update with this disk. Default is to use the existing disk. :param pulumi.Input[str] provisioned_iops: Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the Extreme persistent disk documentation. :param pulumi.Input[Sequence[pulumi.Input[str]]] replica_zones: URLs of the zones where the disk should be replicated to. Only applicable for regional resources. :param pulumi.Input[Sequence[pulumi.Input[str]]] resource_policies: Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name. :param pulumi.Input[str] source_image: The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. To create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image: projects/debian-cloud/global/images/family/debian-9 Alternatively, use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD To create a disk with a custom image that you created, specify the image name in the following format: global/images/my-custom-image You can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name: global/images/family/my-image-family If the source image is deleted later, this field will not be set. :param pulumi.Input['CustomerEncryptionKeyArgs'] source_image_encryption_key: The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. Instance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys. :param pulumi.Input[str] source_instant_snapshot: The source instant-snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceInstantSnapshot initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: us-central1-a/instantSnapshots/my-backup If the source instant-snapshot is deleted later, this field will not be set. :param pulumi.Input[str] source_snapshot: The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: global/snapshots/my-backup If the source snapshot is deleted later, this field will not be set. :param pulumi.Input['CustomerEncryptionKeyArgs'] source_snapshot_encryption_key: The customer-supplied encryption key of the source snapshot. """ if architecture is not None: pulumi.set(__self__, "architecture", architecture) if description is not None: pulumi.set(__self__, "description", description) if disk_name is not None: pulumi.set(__self__, "disk_name", disk_name) if disk_size_gb is not None: pulumi.set(__self__, "disk_size_gb", disk_size_gb) if disk_type is not None: pulumi.set(__self__, "disk_type", disk_type) if guest_os_features is not None: pulumi.set(__self__, "guest_os_features", guest_os_features) if labels is not None: pulumi.set(__self__, "labels", labels) if license_codes is not None: pulumi.set(__self__, "license_codes", license_codes) if licenses is not None: pulumi.set(__self__, "licenses", licenses) if multi_writer is not None: pulumi.set(__self__, "multi_writer", multi_writer) if on_update_action is not None: pulumi.set(__self__, "on_update_action", on_update_action) if provisioned_iops is not None: pulumi.set(__self__, "provisioned_iops", provisioned_iops) if replica_zones is not None: pulumi.set(__self__, "replica_zones", replica_zones) if resource_policies is not None: pulumi.set(__self__, "resource_policies", resource_policies) if source_image is not None: pulumi.set(__self__, "source_image", source_image) if source_image_encryption_key is not None: pulumi.set(__self__, "source_image_encryption_key", source_image_encryption_key) if source_instant_snapshot is not None: pulumi.set(__self__, "source_instant_snapshot", source_instant_snapshot) if source_snapshot is not None: pulumi.set(__self__, "source_snapshot", source_snapshot) if source_snapshot_encryption_key is not None: pulumi.set(__self__, "source_snapshot_encryption_key", source_snapshot_encryption_key) @property @pulumi.getter def architecture(self) -> Optional[pulumi.Input['AttachedDiskInitializeParamsArchitecture']]: """ The architecture of the attached disk. Valid values are arm64 or x86_64. """ return pulumi.get(self, "architecture") @architecture.setter def architecture(self, value: Optional[pulumi.Input['AttachedDiskInitializeParamsArchitecture']]): pulumi.set(self, "architecture", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description. Provide this property when creating the disk. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="diskName") def disk_name(self) -> Optional[pulumi.Input[str]]: """ Specifies the disk name. If not specified, the default is to use the name of the instance. If a disk with the same name already exists in the given region, the existing disk is attached to the new instance and the new disk is not created. """ return pulumi.get(self, "disk_name") @disk_name.setter def disk_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_name", value) @property @pulumi.getter(name="diskSizeGb") def disk_size_gb(self) -> Optional[pulumi.Input[str]]: """ Specifies the size of the disk in base-2 GB. The size must be at least 10 GB. If you specify a sourceImage, which is required for boot disks, the default size is the size of the sourceImage. If you do not specify a sourceImage, the default disk size is 500 GB. """ return pulumi.get(self, "disk_size_gb") @disk_size_gb.setter def disk_size_gb(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_size_gb", value) @property @pulumi.getter(name="diskType") def disk_type(self) -> Optional[pulumi.Input[str]]: """ Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL. """ return pulumi.get(self, "disk_type") @disk_type.setter def disk_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_type", value) @property @pulumi.getter(name="guestOsFeatures") def guest_os_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]]]: """ A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. Guest OS features are applied by merging initializeParams.guestOsFeatures and disks.guestOsFeatures """ return pulumi.get(self, "guest_os_features") @guest_os_features.setter def guest_os_features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]]]): pulumi.set(self, "guest_os_features", value) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks. """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter(name="licenseCodes") def license_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Integer license codes indicating which licenses are attached to this disk. """ return pulumi.get(self, "license_codes") @license_codes.setter def license_codes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "license_codes", value) @property @pulumi.getter def licenses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of publicly visible licenses. Reserved for Google's use. """ return pulumi.get(self, "licenses") @licenses.setter def licenses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "licenses", value) @property @pulumi.getter(name="multiWriter") def multi_writer(self) -> Optional[pulumi.Input[bool]]: """ Indicates whether or not the disk can be read/write attached to more than one instance. """ return pulumi.get(self, "multi_writer") @multi_writer.setter def multi_writer(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "multi_writer", value) @property @pulumi.getter(name="onUpdateAction") def on_update_action(self) -> Optional[pulumi.Input['AttachedDiskInitializeParamsOnUpdateAction']]: """ Specifies which action to take on instance update with this disk. Default is to use the existing disk. """ return pulumi.get(self, "on_update_action") @on_update_action.setter def on_update_action(self, value: Optional[pulumi.Input['AttachedDiskInitializeParamsOnUpdateAction']]): pulumi.set(self, "on_update_action", value) @property @pulumi.getter(name="provisionedIops") def provisioned_iops(self) -> Optional[pulumi.Input[str]]: """ Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the Extreme persistent disk documentation. """ return pulumi.get(self, "provisioned_iops") @provisioned_iops.setter def provisioned_iops(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "provisioned_iops", value) @property @pulumi.getter(name="replicaZones") def replica_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ URLs of the zones where the disk should be replicated to. Only applicable for regional resources. """ return pulumi.get(self, "replica_zones") @replica_zones.setter def replica_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "replica_zones", value) @property @pulumi.getter(name="resourcePolicies") def resource_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name. """ return pulumi.get(self, "resource_policies") @resource_policies.setter def resource_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "resource_policies", value) @property @pulumi.getter(name="sourceImage") def source_image(self) -> Optional[pulumi.Input[str]]: """ The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. To create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image: projects/debian-cloud/global/images/family/debian-9 Alternatively, use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD To create a disk with a custom image that you created, specify the image name in the following format: global/images/my-custom-image You can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name: global/images/family/my-image-family If the source image is deleted later, this field will not be set. """ return pulumi.get(self, "source_image") @source_image.setter def source_image(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_image", value) @property @pulumi.getter(name="sourceImageEncryptionKey") def source_image_encryption_key(self) -> Optional[pulumi.Input['CustomerEncryptionKeyArgs']]: """ The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. Instance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys. """ return pulumi.get(self, "source_image_encryption_key") @source_image_encryption_key.setter def source_image_encryption_key(self, value: Optional[pulumi.Input['CustomerEncryptionKeyArgs']]): pulumi.set(self, "source_image_encryption_key", value) @property @pulumi.getter(name="sourceInstantSnapshot") def source_instant_snapshot(self) -> Optional[pulumi.Input[str]]: """ The source instant-snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceInstantSnapshot initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: us-central1-a/instantSnapshots/my-backup If the source instant-snapshot is deleted later, this field will not be set. """ return pulumi.get(self, "source_instant_snapshot") @source_instant_snapshot.setter def source_instant_snapshot(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_instant_snapshot", value) @property @pulumi.getter(name="sourceSnapshot") def source_snapshot(self) -> Optional[pulumi.Input[str]]: """ The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: global/snapshots/my-backup If the source snapshot is deleted later, this field will not be set. """ return pulumi.get(self, "source_snapshot") @source_snapshot.setter def source_snapshot(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_snapshot", value) @property @pulumi.getter(name="sourceSnapshotEncryptionKey") def source_snapshot_encryption_key(self) -> Optional[pulumi.Input['CustomerEncryptionKeyArgs']]: """ The customer-supplied encryption key of the source snapshot. """ return pulumi.get(self, "source_snapshot_encryption_key") @source_snapshot_encryption_key.setter def source_snapshot_encryption_key(self, value: Optional[pulumi.Input['CustomerEncryptionKeyArgs']]): pulumi.set(self, "source_snapshot_encryption_key", value) @pulumi.input_type class AttachedDiskArgs: def __init__(__self__, *, auto_delete: Optional[pulumi.Input[bool]] = None, boot: Optional[pulumi.Input[bool]] = None, device_name: Optional[pulumi.Input[str]] = None, disk_encryption_key: Optional[pulumi.Input['CustomerEncryptionKeyArgs']] = None, disk_size_gb: Optional[pulumi.Input[str]] = None, force_attach: Optional[pulumi.Input[bool]] = None, guest_os_features: Optional[pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]]] = None, initialize_params: Optional[pulumi.Input['AttachedDiskInitializeParamsArgs']] = None, interface: Optional[pulumi.Input['AttachedDiskInterface']] = None, mode: Optional[pulumi.Input['AttachedDiskMode']] = None, saved_state: Optional[pulumi.Input['AttachedDiskSavedState']] = None, source: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input['AttachedDiskType']] = None): """ An instance-attached disk resource. :param pulumi.Input[bool] auto_delete: Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). :param pulumi.Input[bool] boot: Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem. :param pulumi.Input[str] device_name: Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. :param pulumi.Input['CustomerEncryptionKeyArgs'] disk_encryption_key: Encrypts or decrypts a disk using a customer-supplied encryption key. If you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key. If you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance. If you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later. Instance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group. :param pulumi.Input[str] disk_size_gb: The size of the disk in GB. :param pulumi.Input[bool] force_attach: [Input Only] Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. :param pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]] guest_os_features: A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. :param pulumi.Input['AttachedDiskInitializeParamsArgs'] initialize_params: [Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both. :param pulumi.Input['AttachedDiskInterface'] interface: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. :param pulumi.Input['AttachedDiskMode'] mode: The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. :param pulumi.Input['AttachedDiskSavedState'] saved_state: For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api. :param pulumi.Input[str] source: Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. If desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks. Note that for InstanceTemplate, specify the disk name for zonal disk, and the URL for regional disk. :param pulumi.Input['AttachedDiskType'] type: Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. """ if auto_delete is not None: pulumi.set(__self__, "auto_delete", auto_delete) if boot is not None: pulumi.set(__self__, "boot", boot) if device_name is not None: pulumi.set(__self__, "device_name", device_name) if disk_encryption_key is not None: pulumi.set(__self__, "disk_encryption_key", disk_encryption_key) if disk_size_gb is not None: pulumi.set(__self__, "disk_size_gb", disk_size_gb) if force_attach is not None: pulumi.set(__self__, "force_attach", force_attach) if guest_os_features is not None: pulumi.set(__self__, "guest_os_features", guest_os_features) if initialize_params is not None: pulumi.set(__self__, "initialize_params", initialize_params) if interface is not None: pulumi.set(__self__, "interface", interface) if mode is not None: pulumi.set(__self__, "mode", mode) if saved_state is not None: pulumi.set(__self__, "saved_state", saved_state) if source is not None: pulumi.set(__self__, "source", source) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="autoDelete") def auto_delete(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). """ return pulumi.get(self, "auto_delete") @auto_delete.setter def auto_delete(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "auto_delete", value) @property @pulumi.getter def boot(self) -> Optional[pulumi.Input[bool]]: """ Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem. """ return pulumi.get(self, "boot") @boot.setter def boot(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "boot", value) @property @pulumi.getter(name="deviceName") def device_name(self) -> Optional[pulumi.Input[str]]: """ Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. """ return pulumi.get(self, "device_name") @device_name.setter def device_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "device_name", value) @property @pulumi.getter(name="diskEncryptionKey") def disk_encryption_key(self) -> Optional[pulumi.Input['CustomerEncryptionKeyArgs']]: """ Encrypts or decrypts a disk using a customer-supplied encryption key. If you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key. If you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance. If you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later. Instance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group. """ return pulumi.get(self, "disk_encryption_key") @disk_encryption_key.setter def disk_encryption_key(self, value: Optional[pulumi.Input['CustomerEncryptionKeyArgs']]): pulumi.set(self, "disk_encryption_key", value) @property @pulumi.getter(name="diskSizeGb") def disk_size_gb(self) -> Optional[pulumi.Input[str]]: """ The size of the disk in GB. """ return pulumi.get(self, "disk_size_gb") @disk_size_gb.setter def disk_size_gb(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_size_gb", value) @property @pulumi.getter(name="forceAttach") def force_attach(self) -> Optional[pulumi.Input[bool]]: """ [Input Only] Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. """ return pulumi.get(self, "force_attach") @force_attach.setter def force_attach(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "force_attach", value) @property @pulumi.getter(name="guestOsFeatures") def guest_os_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]]]: """ A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. """ return pulumi.get(self, "guest_os_features") @guest_os_features.setter def guest_os_features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]]]): pulumi.set(self, "guest_os_features", value) @property @pulumi.getter(name="initializeParams") def initialize_params(self) -> Optional[pulumi.Input['AttachedDiskInitializeParamsArgs']]: """ [Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both. """ return pulumi.get(self, "initialize_params") @initialize_params.setter def initialize_params(self, value: Optional[pulumi.Input['AttachedDiskInitializeParamsArgs']]): pulumi.set(self, "initialize_params", value) @property @pulumi.getter def interface(self) -> Optional[pulumi.Input['AttachedDiskInterface']]: """ Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. """ return pulumi.get(self, "interface") @interface.setter def interface(self, value: Optional[pulumi.Input['AttachedDiskInterface']]): pulumi.set(self, "interface", value) @property @pulumi.getter def mode(self) -> Optional[pulumi.Input['AttachedDiskMode']]: """ The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. """ return pulumi.get(self, "mode") @mode.setter def mode(self, value: Optional[pulumi.Input['AttachedDiskMode']]): pulumi.set(self, "mode", value) @property @pulumi.getter(name="savedState") def saved_state(self) -> Optional[pulumi.Input['AttachedDiskSavedState']]: """ For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api. """ return pulumi.get(self, "saved_state") @saved_state.setter def saved_state(self, value: Optional[pulumi.Input['AttachedDiskSavedState']]): pulumi.set(self, "saved_state", value) @property @pulumi.getter def source(self) -> Optional[pulumi.Input[str]]: """ Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. If desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks. Note that for InstanceTemplate, specify the disk name for zonal disk, and the URL for regional disk. """ return pulumi.get(self, "source") @source.setter def source(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['AttachedDiskType']]: """ Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['AttachedDiskType']]): pulumi.set(self, "type", value) @pulumi.input_type class AuditConfigArgs: def __init__(__self__, *, audit_log_configs: Optional[pulumi.Input[Sequence[pulumi.Input['AuditLogConfigArgs']]]] = None, exempted_members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, service: Optional[pulumi.Input[str]] = None): """ Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:[email protected]" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts [email protected] from DATA_READ logging, and [email protected] from DATA_WRITE logging. :param pulumi.Input[Sequence[pulumi.Input['AuditLogConfigArgs']]] audit_log_configs: The configuration for logging of each type of permission. :param pulumi.Input[Sequence[pulumi.Input[str]]] exempted_members: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] service: Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services. """ if audit_log_configs is not None: pulumi.set(__self__, "audit_log_configs", audit_log_configs) if exempted_members is not None: pulumi.set(__self__, "exempted_members", exempted_members) if service is not None: pulumi.set(__self__, "service", service) @property @pulumi.getter(name="auditLogConfigs") def audit_log_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AuditLogConfigArgs']]]]: """ The configuration for logging of each type of permission. """ return pulumi.get(self, "audit_log_configs") @audit_log_configs.setter def audit_log_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AuditLogConfigArgs']]]]): pulumi.set(self, "audit_log_configs", value) @property @pulumi.getter(name="exemptedMembers") def exempted_members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "exempted_members") @exempted_members.setter def exempted_members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "exempted_members", value) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services. """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @pulumi.input_type class AuditLogConfigArgs: def __init__(__self__, *, exempted_members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, ignore_child_exemptions: Optional[pulumi.Input[bool]] = None, log_type: Optional[pulumi.Input['AuditLogConfigLogType']] = None): """ Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting [email protected] from DATA_READ logging. :param pulumi.Input[Sequence[pulumi.Input[str]]] exempted_members: Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members. :param pulumi.Input[bool] ignore_child_exemptions: This is deprecated and has no effect. Do not use. :param pulumi.Input['AuditLogConfigLogType'] log_type: The log type that this config enables. """ if exempted_members is not None: pulumi.set(__self__, "exempted_members", exempted_members) if ignore_child_exemptions is not None: pulumi.set(__self__, "ignore_child_exemptions", ignore_child_exemptions) if log_type is not None: pulumi.set(__self__, "log_type", log_type) @property @pulumi.getter(name="exemptedMembers") def exempted_members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members. """ return pulumi.get(self, "exempted_members") @exempted_members.setter def exempted_members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "exempted_members", value) @property @pulumi.getter(name="ignoreChildExemptions") def ignore_child_exemptions(self) -> Optional[pulumi.Input[bool]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "ignore_child_exemptions") @ignore_child_exemptions.setter def ignore_child_exemptions(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "ignore_child_exemptions", value) @property @pulumi.getter(name="logType") def log_type(self) -> Optional[pulumi.Input['AuditLogConfigLogType']]: """ The log type that this config enables. """ return pulumi.get(self, "log_type") @log_type.setter def log_type(self, value: Optional[pulumi.Input['AuditLogConfigLogType']]): pulumi.set(self, "log_type", value) @pulumi.input_type class AuthorizationLoggingOptionsArgs: def __init__(__self__, *, permission_type: Optional[pulumi.Input['AuthorizationLoggingOptionsPermissionType']] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input['AuthorizationLoggingOptionsPermissionType'] permission_type: This is deprecated and has no effect. Do not use. """ if permission_type is not None: pulumi.set(__self__, "permission_type", permission_type) @property @pulumi.getter(name="permissionType") def permission_type(self) -> Optional[pulumi.Input['AuthorizationLoggingOptionsPermissionType']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "permission_type") @permission_type.setter def permission_type(self, value: Optional[pulumi.Input['AuthorizationLoggingOptionsPermissionType']]): pulumi.set(self, "permission_type", value) @pulumi.input_type class AutoscalingPolicyCpuUtilizationArgs: def __init__(__self__, *, predictive_method: Optional[pulumi.Input['AutoscalingPolicyCpuUtilizationPredictiveMethod']] = None, utilization_target: Optional[pulumi.Input[float]] = None): """ CPU utilization policy. :param pulumi.Input['AutoscalingPolicyCpuUtilizationPredictiveMethod'] predictive_method: Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are: * NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. * OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand. :param pulumi.Input[float] utilization_target: The target CPU utilization that the autoscaler maintains. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales in the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales out until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization. """ if predictive_method is not None: pulumi.set(__self__, "predictive_method", predictive_method) if utilization_target is not None: pulumi.set(__self__, "utilization_target", utilization_target) @property @pulumi.getter(name="predictiveMethod") def predictive_method(self) -> Optional[pulumi.Input['AutoscalingPolicyCpuUtilizationPredictiveMethod']]: """ Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are: * NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. * OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand. """ return pulumi.get(self, "predictive_method") @predictive_method.setter def predictive_method(self, value: Optional[pulumi.Input['AutoscalingPolicyCpuUtilizationPredictiveMethod']]): pulumi.set(self, "predictive_method", value) @property @pulumi.getter(name="utilizationTarget") def utilization_target(self) -> Optional[pulumi.Input[float]]: """ The target CPU utilization that the autoscaler maintains. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales in the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales out until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization. """ return pulumi.get(self, "utilization_target") @utilization_target.setter def utilization_target(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "utilization_target", value) @pulumi.input_type class AutoscalingPolicyCustomMetricUtilizationArgs: def __init__(__self__, *, filter: Optional[pulumi.Input[str]] = None, metric: Optional[pulumi.Input[str]] = None, single_instance_assignment: Optional[pulumi.Input[float]] = None, utilization_target: Optional[pulumi.Input[float]] = None, utilization_target_type: Optional[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationUtilizationTargetType']] = None): """ Custom utilization metric policy. :param pulumi.Input[str] filter: A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. For the filter to be valid for autoscaling purposes, the following rules apply: - You can only use the AND operator for joining selectors. - You can only use direct equality comparison operator (=) without any functions for each selector. - You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. - The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a *per-group metric* for the purpose of autoscaling. If not specified, the type defaults to gce_instance. Try to provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value. :param pulumi.Input[str] metric: The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE. :param pulumi.Input[float] single_instance_assignment: If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler keeps the number of instances proportional to the value of this metric. The metric itself does not change value due to group resizing. A good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead. :param pulumi.Input[float] utilization_target: The target value of the metric that autoscaler maintains. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilization_target is https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. The autoscaler works to keep this value constant for each of the instances. :param pulumi.Input['AutoscalingPolicyCustomMetricUtilizationUtilizationTargetType'] utilization_target_type: Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or DELTA_PER_MINUTE. """ if filter is not None: pulumi.set(__self__, "filter", filter) if metric is not None: pulumi.set(__self__, "metric", metric) if single_instance_assignment is not None: pulumi.set(__self__, "single_instance_assignment", single_instance_assignment) if utilization_target is not None: pulumi.set(__self__, "utilization_target", utilization_target) if utilization_target_type is not None: pulumi.set(__self__, "utilization_target_type", utilization_target_type) @property @pulumi.getter def filter(self) -> Optional[pulumi.Input[str]]: """ A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. For the filter to be valid for autoscaling purposes, the following rules apply: - You can only use the AND operator for joining selectors. - You can only use direct equality comparison operator (=) without any functions for each selector. - You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. - The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a *per-group metric* for the purpose of autoscaling. If not specified, the type defaults to gce_instance. Try to provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value. """ return pulumi.get(self, "filter") @filter.setter def filter(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "filter", value) @property @pulumi.getter def metric(self) -> Optional[pulumi.Input[str]]: """ The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE. """ return pulumi.get(self, "metric") @metric.setter def metric(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "metric", value) @property @pulumi.getter(name="singleInstanceAssignment") def single_instance_assignment(self) -> Optional[pulumi.Input[float]]: """ If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler keeps the number of instances proportional to the value of this metric. The metric itself does not change value due to group resizing. A good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead. """ return pulumi.get(self, "single_instance_assignment") @single_instance_assignment.setter def single_instance_assignment(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "single_instance_assignment", value) @property @pulumi.getter(name="utilizationTarget") def utilization_target(self) -> Optional[pulumi.Input[float]]: """ The target value of the metric that autoscaler maintains. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilization_target is https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. The autoscaler works to keep this value constant for each of the instances. """ return pulumi.get(self, "utilization_target") @utilization_target.setter def utilization_target(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "utilization_target", value) @property @pulumi.getter(name="utilizationTargetType") def utilization_target_type(self) -> Optional[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationUtilizationTargetType']]: """ Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or DELTA_PER_MINUTE. """ return pulumi.get(self, "utilization_target_type") @utilization_target_type.setter def utilization_target_type(self, value: Optional[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationUtilizationTargetType']]): pulumi.set(self, "utilization_target_type", value) @pulumi.input_type class AutoscalingPolicyLoadBalancingUtilizationArgs: def __init__(__self__, *, utilization_target: Optional[pulumi.Input[float]] = None): """ Configuration parameters of autoscaling based on load balancing. :param pulumi.Input[float] utilization_target: Fraction of backend capacity utilization (set in HTTP(S) load balancing configuration) that the autoscaler maintains. Must be a positive float value. If not defined, the default is 0.8. """ if utilization_target is not None: pulumi.set(__self__, "utilization_target", utilization_target) @property @pulumi.getter(name="utilizationTarget") def utilization_target(self) -> Optional[pulumi.Input[float]]: """ Fraction of backend capacity utilization (set in HTTP(S) load balancing configuration) that the autoscaler maintains. Must be a positive float value. If not defined, the default is 0.8. """ return pulumi.get(self, "utilization_target") @utilization_target.setter def utilization_target(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "utilization_target", value) @pulumi.input_type class AutoscalingPolicyScaleDownControlArgs: def __init__(__self__, *, max_scaled_down_replicas: Optional[pulumi.Input['FixedOrPercentArgs']] = None, time_window_sec: Optional[pulumi.Input[int]] = None): """ Configuration that allows for slower scale in so that even if Autoscaler recommends an abrupt scale in of a MIG, it will be throttled as specified by the parameters below. :param pulumi.Input['FixedOrPercentArgs'] max_scaled_down_replicas: Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step. :param pulumi.Input[int] time_window_sec: How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. """ if max_scaled_down_replicas is not None: pulumi.set(__self__, "max_scaled_down_replicas", max_scaled_down_replicas) if time_window_sec is not None: pulumi.set(__self__, "time_window_sec", time_window_sec) @property @pulumi.getter(name="maxScaledDownReplicas") def max_scaled_down_replicas(self) -> Optional[pulumi.Input['FixedOrPercentArgs']]: """ Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step. """ return pulumi.get(self, "max_scaled_down_replicas") @max_scaled_down_replicas.setter def max_scaled_down_replicas(self, value: Optional[pulumi.Input['FixedOrPercentArgs']]): pulumi.set(self, "max_scaled_down_replicas", value) @property @pulumi.getter(name="timeWindowSec") def time_window_sec(self) -> Optional[pulumi.Input[int]]: """ How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. """ return pulumi.get(self, "time_window_sec") @time_window_sec.setter def time_window_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "time_window_sec", value) @pulumi.input_type class AutoscalingPolicyScaleInControlArgs: def __init__(__self__, *, max_scaled_in_replicas: Optional[pulumi.Input['FixedOrPercentArgs']] = None, time_window_sec: Optional[pulumi.Input[int]] = None): """ Configuration that allows for slower scale in so that even if Autoscaler recommends an abrupt scale in of a MIG, it will be throttled as specified by the parameters below. :param pulumi.Input['FixedOrPercentArgs'] max_scaled_in_replicas: Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step. :param pulumi.Input[int] time_window_sec: How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. """ if max_scaled_in_replicas is not None: pulumi.set(__self__, "max_scaled_in_replicas", max_scaled_in_replicas) if time_window_sec is not None: pulumi.set(__self__, "time_window_sec", time_window_sec) @property @pulumi.getter(name="maxScaledInReplicas") def max_scaled_in_replicas(self) -> Optional[pulumi.Input['FixedOrPercentArgs']]: """ Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step. """ return pulumi.get(self, "max_scaled_in_replicas") @max_scaled_in_replicas.setter def max_scaled_in_replicas(self, value: Optional[pulumi.Input['FixedOrPercentArgs']]): pulumi.set(self, "max_scaled_in_replicas", value) @property @pulumi.getter(name="timeWindowSec") def time_window_sec(self) -> Optional[pulumi.Input[int]]: """ How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. """ return pulumi.get(self, "time_window_sec") @time_window_sec.setter def time_window_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "time_window_sec", value) @pulumi.input_type class AutoscalingPolicyArgs: def __init__(__self__, *, cool_down_period_sec: Optional[pulumi.Input[int]] = None, cpu_utilization: Optional[pulumi.Input['AutoscalingPolicyCpuUtilizationArgs']] = None, custom_metric_utilizations: Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationArgs']]]] = None, load_balancing_utilization: Optional[pulumi.Input['AutoscalingPolicyLoadBalancingUtilizationArgs']] = None, max_num_replicas: Optional[pulumi.Input[int]] = None, min_num_replicas: Optional[pulumi.Input[int]] = None, mode: Optional[pulumi.Input['AutoscalingPolicyMode']] = None, scale_down_control: Optional[pulumi.Input['AutoscalingPolicyScaleDownControlArgs']] = None, scale_in_control: Optional[pulumi.Input['AutoscalingPolicyScaleInControlArgs']] = None, scaling_schedules: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ Cloud Autoscaler policy. :param pulumi.Input[int] cool_down_period_sec: The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process. :param pulumi.Input['AutoscalingPolicyCpuUtilizationArgs'] cpu_utilization: Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. :param pulumi.Input[Sequence[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationArgs']]] custom_metric_utilizations: Configuration parameters of autoscaling based on a custom metric. :param pulumi.Input['AutoscalingPolicyLoadBalancingUtilizationArgs'] load_balancing_utilization: Configuration parameters of autoscaling based on load balancer. :param pulumi.Input[int] max_num_replicas: The maximum number of instances that the autoscaler can scale out to. This is required when creating or updating an autoscaler. The maximum number of replicas must not be lower than minimal number of replicas. :param pulumi.Input[int] min_num_replicas: The minimum number of replicas that the autoscaler can scale in to. This cannot be less than 0. If not provided, autoscaler chooses a default value depending on maximum number of instances allowed. :param pulumi.Input['AutoscalingPolicyMode'] mode: Defines operating mode for this policy. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] scaling_schedules: Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler, and they can overlap. During overlapping periods the greatest min_required_replicas of all scaling schedules is applied. Up to 128 scaling schedules are allowed. """ if cool_down_period_sec is not None: pulumi.set(__self__, "cool_down_period_sec", cool_down_period_sec) if cpu_utilization is not None: pulumi.set(__self__, "cpu_utilization", cpu_utilization) if custom_metric_utilizations is not None: pulumi.set(__self__, "custom_metric_utilizations", custom_metric_utilizations) if load_balancing_utilization is not None: pulumi.set(__self__, "load_balancing_utilization", load_balancing_utilization) if max_num_replicas is not None: pulumi.set(__self__, "max_num_replicas", max_num_replicas) if min_num_replicas is not None: pulumi.set(__self__, "min_num_replicas", min_num_replicas) if mode is not None: pulumi.set(__self__, "mode", mode) if scale_down_control is not None: pulumi.set(__self__, "scale_down_control", scale_down_control) if scale_in_control is not None: pulumi.set(__self__, "scale_in_control", scale_in_control) if scaling_schedules is not None: pulumi.set(__self__, "scaling_schedules", scaling_schedules) @property @pulumi.getter(name="coolDownPeriodSec") def cool_down_period_sec(self) -> Optional[pulumi.Input[int]]: """ The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process. """ return pulumi.get(self, "cool_down_period_sec") @cool_down_period_sec.setter def cool_down_period_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "cool_down_period_sec", value) @property @pulumi.getter(name="cpuUtilization") def cpu_utilization(self) -> Optional[pulumi.Input['AutoscalingPolicyCpuUtilizationArgs']]: """ Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. """ return pulumi.get(self, "cpu_utilization") @cpu_utilization.setter def cpu_utilization(self, value: Optional[pulumi.Input['AutoscalingPolicyCpuUtilizationArgs']]): pulumi.set(self, "cpu_utilization", value) @property @pulumi.getter(name="customMetricUtilizations") def custom_metric_utilizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationArgs']]]]: """ Configuration parameters of autoscaling based on a custom metric. """ return pulumi.get(self, "custom_metric_utilizations") @custom_metric_utilizations.setter def custom_metric_utilizations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationArgs']]]]): pulumi.set(self, "custom_metric_utilizations", value) @property @pulumi.getter(name="loadBalancingUtilization") def load_balancing_utilization(self) -> Optional[pulumi.Input['AutoscalingPolicyLoadBalancingUtilizationArgs']]: """ Configuration parameters of autoscaling based on load balancer. """ return pulumi.get(self, "load_balancing_utilization") @load_balancing_utilization.setter def load_balancing_utilization(self, value: Optional[pulumi.Input['AutoscalingPolicyLoadBalancingUtilizationArgs']]): pulumi.set(self, "load_balancing_utilization", value) @property @pulumi.getter(name="maxNumReplicas") def max_num_replicas(self) -> Optional[pulumi.Input[int]]: """ The maximum number of instances that the autoscaler can scale out to. This is required when creating or updating an autoscaler. The maximum number of replicas must not be lower than minimal number of replicas. """ return pulumi.get(self, "max_num_replicas") @max_num_replicas.setter def max_num_replicas(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_num_replicas", value) @property @pulumi.getter(name="minNumReplicas") def min_num_replicas(self) -> Optional[pulumi.Input[int]]: """ The minimum number of replicas that the autoscaler can scale in to. This cannot be less than 0. If not provided, autoscaler chooses a default value depending on maximum number of instances allowed. """ return pulumi.get(self, "min_num_replicas") @min_num_replicas.setter def min_num_replicas(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_num_replicas", value) @property @pulumi.getter def mode(self) -> Optional[pulumi.Input['AutoscalingPolicyMode']]: """ Defines operating mode for this policy. """ return pulumi.get(self, "mode") @mode.setter def mode(self, value: Optional[pulumi.Input['AutoscalingPolicyMode']]): pulumi.set(self, "mode", value) @property @pulumi.getter(name="scaleDownControl") def scale_down_control(self) -> Optional[pulumi.Input['AutoscalingPolicyScaleDownControlArgs']]: return pulumi.get(self, "scale_down_control") @scale_down_control.setter def scale_down_control(self, value: Optional[pulumi.Input['AutoscalingPolicyScaleDownControlArgs']]): pulumi.set(self, "scale_down_control", value) @property @pulumi.getter(name="scaleInControl") def scale_in_control(self) -> Optional[pulumi.Input['AutoscalingPolicyScaleInControlArgs']]: return pulumi.get(self, "scale_in_control") @scale_in_control.setter def scale_in_control(self, value: Optional[pulumi.Input['AutoscalingPolicyScaleInControlArgs']]): pulumi.set(self, "scale_in_control", value) @property @pulumi.getter(name="scalingSchedules") def scaling_schedules(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler, and they can overlap. During overlapping periods the greatest min_required_replicas of all scaling schedules is applied. Up to 128 scaling schedules are allowed. """ return pulumi.get(self, "scaling_schedules") @scaling_schedules.setter def scaling_schedules(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "scaling_schedules", value) @pulumi.input_type class BackendBucketCdnPolicyBypassCacheOnRequestHeaderArgs: def __init__(__self__, *, header_name: Optional[pulumi.Input[str]] = None): """ Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting. :param pulumi.Input[str] header_name: The header field name to match on when bypassing cache. Values are case-insensitive. """ if header_name is not None: pulumi.set(__self__, "header_name", header_name) @property @pulumi.getter(name="headerName") def header_name(self) -> Optional[pulumi.Input[str]]: """ The header field name to match on when bypassing cache. Values are case-insensitive. """ return pulumi.get(self, "header_name") @header_name.setter def header_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_name", value) @pulumi.input_type class BackendBucketCdnPolicyCacheKeyPolicyArgs: def __init__(__self__, *, include_http_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, query_string_whitelist: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Message containing what to include in the cache key for a request for Cloud CDN. :param pulumi.Input[Sequence[pulumi.Input[str]]] include_http_headers: Allows HTTP request headers (by name) to be used in the cache key. :param pulumi.Input[Sequence[pulumi.Input[str]]] query_string_whitelist: Names of query string parameters to include in cache keys. All other parameters will be excluded. '&' and '=' will be percent encoded and not treated as delimiters. """ if include_http_headers is not None: pulumi.set(__self__, "include_http_headers", include_http_headers) if query_string_whitelist is not None: pulumi.set(__self__, "query_string_whitelist", query_string_whitelist) @property @pulumi.getter(name="includeHttpHeaders") def include_http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Allows HTTP request headers (by name) to be used in the cache key. """ return pulumi.get(self, "include_http_headers") @include_http_headers.setter def include_http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "include_http_headers", value) @property @pulumi.getter(name="queryStringWhitelist") def query_string_whitelist(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Names of query string parameters to include in cache keys. All other parameters will be excluded. '&' and '=' will be percent encoded and not treated as delimiters. """ return pulumi.get(self, "query_string_whitelist") @query_string_whitelist.setter def query_string_whitelist(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "query_string_whitelist", value) @pulumi.input_type class BackendBucketCdnPolicyNegativeCachingPolicyArgs: def __init__(__self__, *, code: Optional[pulumi.Input[int]] = None, ttl: Optional[pulumi.Input[int]] = None): """ Specify CDN TTLs for response error codes. :param pulumi.Input[int] code: The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once. :param pulumi.Input[int] ttl: The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ if code is not None: pulumi.set(__self__, "code", code) if ttl is not None: pulumi.set(__self__, "ttl", ttl) @property @pulumi.getter def code(self) -> Optional[pulumi.Input[int]]: """ The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once. """ return pulumi.get(self, "code") @code.setter def code(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "code", value) @property @pulumi.getter def ttl(self) -> Optional[pulumi.Input[int]]: """ The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ return pulumi.get(self, "ttl") @ttl.setter def ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "ttl", value) @pulumi.input_type class BackendBucketCdnPolicyArgs: def __init__(__self__, *, bypass_cache_on_request_headers: Optional[pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyBypassCacheOnRequestHeaderArgs']]]] = None, cache_key_policy: Optional[pulumi.Input['BackendBucketCdnPolicyCacheKeyPolicyArgs']] = None, cache_mode: Optional[pulumi.Input['BackendBucketCdnPolicyCacheMode']] = None, client_ttl: Optional[pulumi.Input[int]] = None, default_ttl: Optional[pulumi.Input[int]] = None, max_ttl: Optional[pulumi.Input[int]] = None, negative_caching: Optional[pulumi.Input[bool]] = None, negative_caching_policy: Optional[pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyNegativeCachingPolicyArgs']]]] = None, request_coalescing: Optional[pulumi.Input[bool]] = None, serve_while_stale: Optional[pulumi.Input[int]] = None, signed_url_cache_max_age_sec: Optional[pulumi.Input[str]] = None): """ Message containing Cloud CDN configuration for a backend bucket. :param pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyBypassCacheOnRequestHeaderArgs']]] bypass_cache_on_request_headers: Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. :param pulumi.Input['BackendBucketCdnPolicyCacheKeyPolicyArgs'] cache_key_policy: The CacheKeyPolicy for this CdnPolicy. :param pulumi.Input['BackendBucketCdnPolicyCacheMode'] cache_mode: Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. :param pulumi.Input[int] client_ttl: Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). :param pulumi.Input[int] default_ttl: Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. :param pulumi.Input[int] max_ttl: Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. :param pulumi.Input[bool] negative_caching: Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy. :param pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyNegativeCachingPolicyArgs']]] negative_caching_policy: Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists. :param pulumi.Input[bool] request_coalescing: If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. :param pulumi.Input[int] serve_while_stale: Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale. :param pulumi.Input[str] signed_url_cache_max_age_sec: Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered. """ if bypass_cache_on_request_headers is not None: pulumi.set(__self__, "bypass_cache_on_request_headers", bypass_cache_on_request_headers) if cache_key_policy is not None: pulumi.set(__self__, "cache_key_policy", cache_key_policy) if cache_mode is not None: pulumi.set(__self__, "cache_mode", cache_mode) if client_ttl is not None: pulumi.set(__self__, "client_ttl", client_ttl) if default_ttl is not None: pulumi.set(__self__, "default_ttl", default_ttl) if max_ttl is not None: pulumi.set(__self__, "max_ttl", max_ttl) if negative_caching is not None: pulumi.set(__self__, "negative_caching", negative_caching) if negative_caching_policy is not None: pulumi.set(__self__, "negative_caching_policy", negative_caching_policy) if request_coalescing is not None: pulumi.set(__self__, "request_coalescing", request_coalescing) if serve_while_stale is not None: pulumi.set(__self__, "serve_while_stale", serve_while_stale) if signed_url_cache_max_age_sec is not None: pulumi.set(__self__, "signed_url_cache_max_age_sec", signed_url_cache_max_age_sec) @property @pulumi.getter(name="bypassCacheOnRequestHeaders") def bypass_cache_on_request_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyBypassCacheOnRequestHeaderArgs']]]]: """ Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. """ return pulumi.get(self, "bypass_cache_on_request_headers") @bypass_cache_on_request_headers.setter def bypass_cache_on_request_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyBypassCacheOnRequestHeaderArgs']]]]): pulumi.set(self, "bypass_cache_on_request_headers", value) @property @pulumi.getter(name="cacheKeyPolicy") def cache_key_policy(self) -> Optional[pulumi.Input['BackendBucketCdnPolicyCacheKeyPolicyArgs']]: """ The CacheKeyPolicy for this CdnPolicy. """ return pulumi.get(self, "cache_key_policy") @cache_key_policy.setter def cache_key_policy(self, value: Optional[pulumi.Input['BackendBucketCdnPolicyCacheKeyPolicyArgs']]): pulumi.set(self, "cache_key_policy", value) @property @pulumi.getter(name="cacheMode") def cache_mode(self) -> Optional[pulumi.Input['BackendBucketCdnPolicyCacheMode']]: """ Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. """ return pulumi.get(self, "cache_mode") @cache_mode.setter def cache_mode(self, value: Optional[pulumi.Input['BackendBucketCdnPolicyCacheMode']]): pulumi.set(self, "cache_mode", value) @property @pulumi.getter(name="clientTtl") def client_ttl(self) -> Optional[pulumi.Input[int]]: """ Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). """ return pulumi.get(self, "client_ttl") @client_ttl.setter def client_ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "client_ttl", value) @property @pulumi.getter(name="defaultTtl") def default_ttl(self) -> Optional[pulumi.Input[int]]: """ Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ return pulumi.get(self, "default_ttl") @default_ttl.setter def default_ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "default_ttl", value) @property @pulumi.getter(name="maxTtl") def max_ttl(self) -> Optional[pulumi.Input[int]]: """ Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ return pulumi.get(self, "max_ttl") @max_ttl.setter def max_ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_ttl", value) @property @pulumi.getter(name="negativeCaching") def negative_caching(self) -> Optional[pulumi.Input[bool]]: """ Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy. """ return pulumi.get(self, "negative_caching") @negative_caching.setter def negative_caching(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "negative_caching", value) @property @pulumi.getter(name="negativeCachingPolicy") def negative_caching_policy(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyNegativeCachingPolicyArgs']]]]: """ Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists. """ return pulumi.get(self, "negative_caching_policy") @negative_caching_policy.setter def negative_caching_policy(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyNegativeCachingPolicyArgs']]]]): pulumi.set(self, "negative_caching_policy", value) @property @pulumi.getter(name="requestCoalescing") def request_coalescing(self) -> Optional[pulumi.Input[bool]]: """ If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. """ return pulumi.get(self, "request_coalescing") @request_coalescing.setter def request_coalescing(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "request_coalescing", value) @property @pulumi.getter(name="serveWhileStale") def serve_while_stale(self) -> Optional[pulumi.Input[int]]: """ Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale. """ return pulumi.get(self, "serve_while_stale") @serve_while_stale.setter def serve_while_stale(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "serve_while_stale", value) @property @pulumi.getter(name="signedUrlCacheMaxAgeSec") def signed_url_cache_max_age_sec(self) -> Optional[pulumi.Input[str]]: """ Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered. """ return pulumi.get(self, "signed_url_cache_max_age_sec") @signed_url_cache_max_age_sec.setter def signed_url_cache_max_age_sec(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "signed_url_cache_max_age_sec", value) @pulumi.input_type class BackendServiceCdnPolicyBypassCacheOnRequestHeaderArgs: def __init__(__self__, *, header_name: Optional[pulumi.Input[str]] = None): """ Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting. :param pulumi.Input[str] header_name: The header field name to match on when bypassing cache. Values are case-insensitive. """ if header_name is not None: pulumi.set(__self__, "header_name", header_name) @property @pulumi.getter(name="headerName") def header_name(self) -> Optional[pulumi.Input[str]]: """ The header field name to match on when bypassing cache. Values are case-insensitive. """ return pulumi.get(self, "header_name") @header_name.setter def header_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_name", value) @pulumi.input_type class BackendServiceCdnPolicyNegativeCachingPolicyArgs: def __init__(__self__, *, code: Optional[pulumi.Input[int]] = None, ttl: Optional[pulumi.Input[int]] = None): """ Specify CDN TTLs for response error codes. :param pulumi.Input[int] code: The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once. :param pulumi.Input[int] ttl: The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ if code is not None: pulumi.set(__self__, "code", code) if ttl is not None: pulumi.set(__self__, "ttl", ttl) @property @pulumi.getter def code(self) -> Optional[pulumi.Input[int]]: """ The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once. """ return pulumi.get(self, "code") @code.setter def code(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "code", value) @property @pulumi.getter def ttl(self) -> Optional[pulumi.Input[int]]: """ The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ return pulumi.get(self, "ttl") @ttl.setter def ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "ttl", value) @pulumi.input_type class BackendServiceCdnPolicyArgs: def __init__(__self__, *, bypass_cache_on_request_headers: Optional[pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyBypassCacheOnRequestHeaderArgs']]]] = None, cache_key_policy: Optional[pulumi.Input['CacheKeyPolicyArgs']] = None, cache_mode: Optional[pulumi.Input['BackendServiceCdnPolicyCacheMode']] = None, client_ttl: Optional[pulumi.Input[int]] = None, default_ttl: Optional[pulumi.Input[int]] = None, max_ttl: Optional[pulumi.Input[int]] = None, negative_caching: Optional[pulumi.Input[bool]] = None, negative_caching_policy: Optional[pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyNegativeCachingPolicyArgs']]]] = None, request_coalescing: Optional[pulumi.Input[bool]] = None, serve_while_stale: Optional[pulumi.Input[int]] = None, signed_url_cache_max_age_sec: Optional[pulumi.Input[str]] = None): """ Message containing Cloud CDN configuration for a backend service. :param pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyBypassCacheOnRequestHeaderArgs']]] bypass_cache_on_request_headers: Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. :param pulumi.Input['CacheKeyPolicyArgs'] cache_key_policy: The CacheKeyPolicy for this CdnPolicy. :param pulumi.Input['BackendServiceCdnPolicyCacheMode'] cache_mode: Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. :param pulumi.Input[int] client_ttl: Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). :param pulumi.Input[int] default_ttl: Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. :param pulumi.Input[int] max_ttl: Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. :param pulumi.Input[bool] negative_caching: Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy. :param pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyNegativeCachingPolicyArgs']]] negative_caching_policy: Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists. :param pulumi.Input[bool] request_coalescing: If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. :param pulumi.Input[int] serve_while_stale: Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale. :param pulumi.Input[str] signed_url_cache_max_age_sec: Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered. """ if bypass_cache_on_request_headers is not None: pulumi.set(__self__, "bypass_cache_on_request_headers", bypass_cache_on_request_headers) if cache_key_policy is not None: pulumi.set(__self__, "cache_key_policy", cache_key_policy) if cache_mode is not None: pulumi.set(__self__, "cache_mode", cache_mode) if client_ttl is not None: pulumi.set(__self__, "client_ttl", client_ttl) if default_ttl is not None: pulumi.set(__self__, "default_ttl", default_ttl) if max_ttl is not None: pulumi.set(__self__, "max_ttl", max_ttl) if negative_caching is not None: pulumi.set(__self__, "negative_caching", negative_caching) if negative_caching_policy is not None: pulumi.set(__self__, "negative_caching_policy", negative_caching_policy) if request_coalescing is not None: pulumi.set(__self__, "request_coalescing", request_coalescing) if serve_while_stale is not None: pulumi.set(__self__, "serve_while_stale", serve_while_stale) if signed_url_cache_max_age_sec is not None: pulumi.set(__self__, "signed_url_cache_max_age_sec", signed_url_cache_max_age_sec) @property @pulumi.getter(name="bypassCacheOnRequestHeaders") def bypass_cache_on_request_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyBypassCacheOnRequestHeaderArgs']]]]: """ Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. """ return pulumi.get(self, "bypass_cache_on_request_headers") @bypass_cache_on_request_headers.setter def bypass_cache_on_request_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyBypassCacheOnRequestHeaderArgs']]]]): pulumi.set(self, "bypass_cache_on_request_headers", value) @property @pulumi.getter(name="cacheKeyPolicy") def cache_key_policy(self) -> Optional[pulumi.Input['CacheKeyPolicyArgs']]: """ The CacheKeyPolicy for this CdnPolicy. """ return pulumi.get(self, "cache_key_policy") @cache_key_policy.setter def cache_key_policy(self, value: Optional[pulumi.Input['CacheKeyPolicyArgs']]): pulumi.set(self, "cache_key_policy", value) @property @pulumi.getter(name="cacheMode") def cache_mode(self) -> Optional[pulumi.Input['BackendServiceCdnPolicyCacheMode']]: """ Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. """ return pulumi.get(self, "cache_mode") @cache_mode.setter def cache_mode(self, value: Optional[pulumi.Input['BackendServiceCdnPolicyCacheMode']]): pulumi.set(self, "cache_mode", value) @property @pulumi.getter(name="clientTtl") def client_ttl(self) -> Optional[pulumi.Input[int]]: """ Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). """ return pulumi.get(self, "client_ttl") @client_ttl.setter def client_ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "client_ttl", value) @property @pulumi.getter(name="defaultTtl") def default_ttl(self) -> Optional[pulumi.Input[int]]: """ Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ return pulumi.get(self, "default_ttl") @default_ttl.setter def default_ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "default_ttl", value) @property @pulumi.getter(name="maxTtl") def max_ttl(self) -> Optional[pulumi.Input[int]]: """ Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ return pulumi.get(self, "max_ttl") @max_ttl.setter def max_ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_ttl", value) @property @pulumi.getter(name="negativeCaching") def negative_caching(self) -> Optional[pulumi.Input[bool]]: """ Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy. """ return pulumi.get(self, "negative_caching") @negative_caching.setter def negative_caching(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "negative_caching", value) @property @pulumi.getter(name="negativeCachingPolicy") def negative_caching_policy(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyNegativeCachingPolicyArgs']]]]: """ Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists. """ return pulumi.get(self, "negative_caching_policy") @negative_caching_policy.setter def negative_caching_policy(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyNegativeCachingPolicyArgs']]]]): pulumi.set(self, "negative_caching_policy", value) @property @pulumi.getter(name="requestCoalescing") def request_coalescing(self) -> Optional[pulumi.Input[bool]]: """ If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. """ return pulumi.get(self, "request_coalescing") @request_coalescing.setter def request_coalescing(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "request_coalescing", value) @property @pulumi.getter(name="serveWhileStale") def serve_while_stale(self) -> Optional[pulumi.Input[int]]: """ Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale. """ return pulumi.get(self, "serve_while_stale") @serve_while_stale.setter def serve_while_stale(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "serve_while_stale", value) @property @pulumi.getter(name="signedUrlCacheMaxAgeSec") def signed_url_cache_max_age_sec(self) -> Optional[pulumi.Input[str]]: """ Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered. """ return pulumi.get(self, "signed_url_cache_max_age_sec") @signed_url_cache_max_age_sec.setter def signed_url_cache_max_age_sec(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "signed_url_cache_max_age_sec", value) @pulumi.input_type class BackendServiceConnectionTrackingPolicyArgs: def __init__(__self__, *, connection_persistence_on_unhealthy_backends: Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyConnectionPersistenceOnUnhealthyBackends']] = None, enable_strong_affinity: Optional[pulumi.Input[bool]] = None, idle_timeout_sec: Optional[pulumi.Input[int]] = None, tracking_mode: Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyTrackingMode']] = None): """ Connection Tracking configuration for this BackendService. :param pulumi.Input['BackendServiceConnectionTrackingPolicyConnectionPersistenceOnUnhealthyBackends'] connection_persistence_on_unhealthy_backends: Specifies connection persistence when backends are unhealthy. The default value is DEFAULT_FOR_PROTOCOL. If set to DEFAULT_FOR_PROTOCOL, the existing connections persist on unhealthy backends only for connection-oriented protocols (TCP and SCTP) and only if the Tracking Mode is PER_CONNECTION (default tracking mode) or the Session Affinity is configured for 5-tuple. They do not persist for UDP. If set to NEVER_PERSIST, after a backend becomes unhealthy, the existing connections on the unhealthy backend are never persisted on the unhealthy backend. They are always diverted to newly selected healthy backends (unless all backends are unhealthy). If set to ALWAYS_PERSIST, existing connections always persist on unhealthy backends regardless of protocol and session affinity. It is generally not recommended to use this mode overriding the default. For more details, see [Connection Persistence for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#connection-persistence) and [Connection Persistence for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#connection-persistence). :param pulumi.Input[bool] enable_strong_affinity: Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly. :param pulumi.Input[int] idle_timeout_sec: Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly. :param pulumi.Input['BackendServiceConnectionTrackingPolicyTrackingMode'] tracking_mode: Specifies the key used for connection tracking. There are two options: - PER_CONNECTION: This is the default mode. The Connection Tracking is performed as per the Connection Key (default Hash Method) for the specific protocol. - PER_SESSION: The Connection Tracking is performed as per the configured Session Affinity. It matches the configured Session Affinity. For more details, see [Tracking Mode for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#tracking-mode) and [Tracking Mode for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#tracking-mode). """ if connection_persistence_on_unhealthy_backends is not None: pulumi.set(__self__, "connection_persistence_on_unhealthy_backends", connection_persistence_on_unhealthy_backends) if enable_strong_affinity is not None: pulumi.set(__self__, "enable_strong_affinity", enable_strong_affinity) if idle_timeout_sec is not None: pulumi.set(__self__, "idle_timeout_sec", idle_timeout_sec) if tracking_mode is not None: pulumi.set(__self__, "tracking_mode", tracking_mode) @property @pulumi.getter(name="connectionPersistenceOnUnhealthyBackends") def connection_persistence_on_unhealthy_backends(self) -> Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyConnectionPersistenceOnUnhealthyBackends']]: """ Specifies connection persistence when backends are unhealthy. The default value is DEFAULT_FOR_PROTOCOL. If set to DEFAULT_FOR_PROTOCOL, the existing connections persist on unhealthy backends only for connection-oriented protocols (TCP and SCTP) and only if the Tracking Mode is PER_CONNECTION (default tracking mode) or the Session Affinity is configured for 5-tuple. They do not persist for UDP. If set to NEVER_PERSIST, after a backend becomes unhealthy, the existing connections on the unhealthy backend are never persisted on the unhealthy backend. They are always diverted to newly selected healthy backends (unless all backends are unhealthy). If set to ALWAYS_PERSIST, existing connections always persist on unhealthy backends regardless of protocol and session affinity. It is generally not recommended to use this mode overriding the default. For more details, see [Connection Persistence for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#connection-persistence) and [Connection Persistence for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#connection-persistence). """ return pulumi.get(self, "connection_persistence_on_unhealthy_backends") @connection_persistence_on_unhealthy_backends.setter def connection_persistence_on_unhealthy_backends(self, value: Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyConnectionPersistenceOnUnhealthyBackends']]): pulumi.set(self, "connection_persistence_on_unhealthy_backends", value) @property @pulumi.getter(name="enableStrongAffinity") def enable_strong_affinity(self) -> Optional[pulumi.Input[bool]]: """ Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly. """ return pulumi.get(self, "enable_strong_affinity") @enable_strong_affinity.setter def enable_strong_affinity(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_strong_affinity", value) @property @pulumi.getter(name="idleTimeoutSec") def idle_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly. """ return pulumi.get(self, "idle_timeout_sec") @idle_timeout_sec.setter def idle_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "idle_timeout_sec", value) @property @pulumi.getter(name="trackingMode") def tracking_mode(self) -> Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyTrackingMode']]: """ Specifies the key used for connection tracking. There are two options: - PER_CONNECTION: This is the default mode. The Connection Tracking is performed as per the Connection Key (default Hash Method) for the specific protocol. - PER_SESSION: The Connection Tracking is performed as per the configured Session Affinity. It matches the configured Session Affinity. For more details, see [Tracking Mode for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#tracking-mode) and [Tracking Mode for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#tracking-mode). """ return pulumi.get(self, "tracking_mode") @tracking_mode.setter def tracking_mode(self, value: Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyTrackingMode']]): pulumi.set(self, "tracking_mode", value) @pulumi.input_type class BackendServiceFailoverPolicyArgs: def __init__(__self__, *, disable_connection_drain_on_failover: Optional[pulumi.Input[bool]] = None, drop_traffic_if_unhealthy: Optional[pulumi.Input[bool]] = None, failover_ratio: Optional[pulumi.Input[float]] = None): """ For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes). :param pulumi.Input[bool] disable_connection_drain_on_failover: This can be set to true only if the protocol is TCP. The default is false. :param pulumi.Input[bool] drop_traffic_if_unhealthy: If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false. :param pulumi.Input[float] failover_ratio: The value of the field must be in the range [0, 1]. If the value is 0, the load balancer performs a failover when the number of healthy primary VMs equals zero. For all other values, the load balancer performs a failover when the total number of healthy primary VMs is less than this ratio. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). """ if disable_connection_drain_on_failover is not None: pulumi.set(__self__, "disable_connection_drain_on_failover", disable_connection_drain_on_failover) if drop_traffic_if_unhealthy is not None: pulumi.set(__self__, "drop_traffic_if_unhealthy", drop_traffic_if_unhealthy) if failover_ratio is not None: pulumi.set(__self__, "failover_ratio", failover_ratio) @property @pulumi.getter(name="disableConnectionDrainOnFailover") def disable_connection_drain_on_failover(self) -> Optional[pulumi.Input[bool]]: """ This can be set to true only if the protocol is TCP. The default is false. """ return pulumi.get(self, "disable_connection_drain_on_failover") @disable_connection_drain_on_failover.setter def disable_connection_drain_on_failover(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "disable_connection_drain_on_failover", value) @property @pulumi.getter(name="dropTrafficIfUnhealthy") def drop_traffic_if_unhealthy(self) -> Optional[pulumi.Input[bool]]: """ If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false. """ return pulumi.get(self, "drop_traffic_if_unhealthy") @drop_traffic_if_unhealthy.setter def drop_traffic_if_unhealthy(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "drop_traffic_if_unhealthy", value) @property @pulumi.getter(name="failoverRatio") def failover_ratio(self) -> Optional[pulumi.Input[float]]: """ The value of the field must be in the range [0, 1]. If the value is 0, the load balancer performs a failover when the number of healthy primary VMs equals zero. For all other values, the load balancer performs a failover when the total number of healthy primary VMs is less than this ratio. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). """ return pulumi.get(self, "failover_ratio") @failover_ratio.setter def failover_ratio(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "failover_ratio", value) @pulumi.input_type class BackendServiceIAPOAuth2ClientInfoArgs: def __init__(__self__, *, application_name: Optional[pulumi.Input[str]] = None, client_name: Optional[pulumi.Input[str]] = None, developer_email_address: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] application_name: Application name to be used in OAuth consent screen. :param pulumi.Input[str] client_name: Name of the client to be generated. Optional - If not provided, the name will be autogenerated by the backend. :param pulumi.Input[str] developer_email_address: Developer's information to be used in OAuth consent screen. """ if application_name is not None: pulumi.set(__self__, "application_name", application_name) if client_name is not None: pulumi.set(__self__, "client_name", client_name) if developer_email_address is not None: pulumi.set(__self__, "developer_email_address", developer_email_address) @property @pulumi.getter(name="applicationName") def application_name(self) -> Optional[pulumi.Input[str]]: """ Application name to be used in OAuth consent screen. """ return pulumi.get(self, "application_name") @application_name.setter def application_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "application_name", value) @property @pulumi.getter(name="clientName") def client_name(self) -> Optional[pulumi.Input[str]]: """ Name of the client to be generated. Optional - If not provided, the name will be autogenerated by the backend. """ return pulumi.get(self, "client_name") @client_name.setter def client_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "client_name", value) @property @pulumi.getter(name="developerEmailAddress") def developer_email_address(self) -> Optional[pulumi.Input[str]]: """ Developer's information to be used in OAuth consent screen. """ return pulumi.get(self, "developer_email_address") @developer_email_address.setter def developer_email_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "developer_email_address", value) @pulumi.input_type class BackendServiceIAPArgs: def __init__(__self__, *, enabled: Optional[pulumi.Input[bool]] = None, oauth2_client_id: Optional[pulumi.Input[str]] = None, oauth2_client_info: Optional[pulumi.Input['BackendServiceIAPOAuth2ClientInfoArgs']] = None, oauth2_client_secret: Optional[pulumi.Input[str]] = None): """ Identity-Aware Proxy :param pulumi.Input[bool] enabled: Whether the serving infrastructure will authenticate and authorize all incoming requests. If true, the oauth2ClientId and oauth2ClientSecret fields must be non-empty. :param pulumi.Input[str] oauth2_client_id: OAuth2 client ID to use for the authentication flow. :param pulumi.Input['BackendServiceIAPOAuth2ClientInfoArgs'] oauth2_client_info: [Input Only] OAuth client info required to generate client id to be used for IAP. :param pulumi.Input[str] oauth2_client_secret: OAuth2 client secret to use for the authentication flow. For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2ClientSecretSha256 field. @InputOnly """ if enabled is not None: pulumi.set(__self__, "enabled", enabled) if oauth2_client_id is not None: pulumi.set(__self__, "oauth2_client_id", oauth2_client_id) if oauth2_client_info is not None: pulumi.set(__self__, "oauth2_client_info", oauth2_client_info) if oauth2_client_secret is not None: pulumi.set(__self__, "oauth2_client_secret", oauth2_client_secret) @property @pulumi.getter def enabled(self) -> Optional[pulumi.Input[bool]]: """ Whether the serving infrastructure will authenticate and authorize all incoming requests. If true, the oauth2ClientId and oauth2ClientSecret fields must be non-empty. """ return pulumi.get(self, "enabled") @enabled.setter def enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enabled", value) @property @pulumi.getter(name="oauth2ClientId") def oauth2_client_id(self) -> Optional[pulumi.Input[str]]: """ OAuth2 client ID to use for the authentication flow. """ return pulumi.get(self, "oauth2_client_id") @oauth2_client_id.setter def oauth2_client_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "oauth2_client_id", value) @property @pulumi.getter(name="oauth2ClientInfo") def oauth2_client_info(self) -> Optional[pulumi.Input['BackendServiceIAPOAuth2ClientInfoArgs']]: """ [Input Only] OAuth client info required to generate client id to be used for IAP. """ return pulumi.get(self, "oauth2_client_info") @oauth2_client_info.setter def oauth2_client_info(self, value: Optional[pulumi.Input['BackendServiceIAPOAuth2ClientInfoArgs']]): pulumi.set(self, "oauth2_client_info", value) @property @pulumi.getter(name="oauth2ClientSecret") def oauth2_client_secret(self) -> Optional[pulumi.Input[str]]: """ OAuth2 client secret to use for the authentication flow. For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2ClientSecretSha256 field. @InputOnly """ return pulumi.get(self, "oauth2_client_secret") @oauth2_client_secret.setter def oauth2_client_secret(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "oauth2_client_secret", value) @pulumi.input_type class BackendServiceLogConfigArgs: def __init__(__self__, *, enable: Optional[pulumi.Input[bool]] = None, sample_rate: Optional[pulumi.Input[float]] = None): """ The available logging options for the load balancer traffic served by this backend service. :param pulumi.Input[bool] enable: This field denotes whether to enable logging for the load balancer traffic served by this backend service. :param pulumi.Input[float] sample_rate: This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0. """ if enable is not None: pulumi.set(__self__, "enable", enable) if sample_rate is not None: pulumi.set(__self__, "sample_rate", sample_rate) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input[bool]]: """ This field denotes whether to enable logging for the load balancer traffic served by this backend service. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable", value) @property @pulumi.getter(name="sampleRate") def sample_rate(self) -> Optional[pulumi.Input[float]]: """ This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0. """ return pulumi.get(self, "sample_rate") @sample_rate.setter def sample_rate(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "sample_rate", value) @pulumi.input_type class BackendArgs: def __init__(__self__, *, balancing_mode: Optional[pulumi.Input['BackendBalancingMode']] = None, capacity_scaler: Optional[pulumi.Input[float]] = None, description: Optional[pulumi.Input[str]] = None, failover: Optional[pulumi.Input[bool]] = None, group: Optional[pulumi.Input[str]] = None, max_connections: Optional[pulumi.Input[int]] = None, max_connections_per_endpoint: Optional[pulumi.Input[int]] = None, max_connections_per_instance: Optional[pulumi.Input[int]] = None, max_rate: Optional[pulumi.Input[int]] = None, max_rate_per_endpoint: Optional[pulumi.Input[float]] = None, max_rate_per_instance: Optional[pulumi.Input[float]] = None, max_utilization: Optional[pulumi.Input[float]] = None): """ Message containing information of one individual backend. :param pulumi.Input['BackendBalancingMode'] balancing_mode: Specifies how to determine whether the backend of a load balancer can handle additional traffic or is fully loaded. For usage guidelines, see Connection balancing mode. Backends must use compatible balancing modes. For more information, see Supported balancing modes and target capacity settings and Restrictions and guidance for instance groups. Note: Currently, if you use the API to configure incompatible balancing modes, the configuration might be accepted even though it has no impact and is ignored. Specifically, Backend.maxUtilization is ignored when Backend.balancingMode is RATE. In the future, this incompatible combination will be rejected. :param pulumi.Input[float] capacity_scaler: A multiplier applied to the backend's target capacity of its balancing mode. The default value is 1, which means the group serves up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. The valid ranges are 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[bool] failover: This field designates whether this is a failover backend. More than one failover backend can be configured for a given BackendService. :param pulumi.Input[str] group: The fully-qualified URL of an instance group or network endpoint group (NEG) resource. To determine what types of backends a load balancer supports, see the [Backend services overview](https://cloud.google.com/load-balancing/docs/backend-service#backends). You must use the *fully-qualified* URL (starting with https://www.googleapis.com/) to specify the instance group or NEG. Partial URLs are not supported. :param pulumi.Input[int] max_connections: Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. :param pulumi.Input[int] max_connections_per_endpoint: Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. :param pulumi.Input[int] max_connections_per_instance: Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. :param pulumi.Input[int] max_rate: Defines a maximum number of HTTP requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. :param pulumi.Input[float] max_rate_per_endpoint: Defines a maximum target for requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. :param pulumi.Input[float] max_rate_per_instance: Defines a maximum target for requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. :param pulumi.Input[float] max_utilization: Optional parameter to define a target capacity for the UTILIZATIONbalancing mode. The valid range is [0.0, 1.0]. For usage guidelines, see Utilization balancing mode. """ if balancing_mode is not None: pulumi.set(__self__, "balancing_mode", balancing_mode) if capacity_scaler is not None: pulumi.set(__self__, "capacity_scaler", capacity_scaler) if description is not None: pulumi.set(__self__, "description", description) if failover is not None: pulumi.set(__self__, "failover", failover) if group is not None: pulumi.set(__self__, "group", group) if max_connections is not None: pulumi.set(__self__, "max_connections", max_connections) if max_connections_per_endpoint is not None: pulumi.set(__self__, "max_connections_per_endpoint", max_connections_per_endpoint) if max_connections_per_instance is not None: pulumi.set(__self__, "max_connections_per_instance", max_connections_per_instance) if max_rate is not None: pulumi.set(__self__, "max_rate", max_rate) if max_rate_per_endpoint is not None: pulumi.set(__self__, "max_rate_per_endpoint", max_rate_per_endpoint) if max_rate_per_instance is not None: pulumi.set(__self__, "max_rate_per_instance", max_rate_per_instance) if max_utilization is not None: pulumi.set(__self__, "max_utilization", max_utilization) @property @pulumi.getter(name="balancingMode") def balancing_mode(self) -> Optional[pulumi.Input['BackendBalancingMode']]: """ Specifies how to determine whether the backend of a load balancer can handle additional traffic or is fully loaded. For usage guidelines, see Connection balancing mode. Backends must use compatible balancing modes. For more information, see Supported balancing modes and target capacity settings and Restrictions and guidance for instance groups. Note: Currently, if you use the API to configure incompatible balancing modes, the configuration might be accepted even though it has no impact and is ignored. Specifically, Backend.maxUtilization is ignored when Backend.balancingMode is RATE. In the future, this incompatible combination will be rejected. """ return pulumi.get(self, "balancing_mode") @balancing_mode.setter def balancing_mode(self, value: Optional[pulumi.Input['BackendBalancingMode']]): pulumi.set(self, "balancing_mode", value) @property @pulumi.getter(name="capacityScaler") def capacity_scaler(self) -> Optional[pulumi.Input[float]]: """ A multiplier applied to the backend's target capacity of its balancing mode. The default value is 1, which means the group serves up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. The valid ranges are 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service. """ return pulumi.get(self, "capacity_scaler") @capacity_scaler.setter def capacity_scaler(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "capacity_scaler", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def failover(self) -> Optional[pulumi.Input[bool]]: """ This field designates whether this is a failover backend. More than one failover backend can be configured for a given BackendService. """ return pulumi.get(self, "failover") @failover.setter def failover(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "failover", value) @property @pulumi.getter def group(self) -> Optional[pulumi.Input[str]]: """ The fully-qualified URL of an instance group or network endpoint group (NEG) resource. To determine what types of backends a load balancer supports, see the [Backend services overview](https://cloud.google.com/load-balancing/docs/backend-service#backends). You must use the *fully-qualified* URL (starting with https://www.googleapis.com/) to specify the instance group or NEG. Partial URLs are not supported. """ return pulumi.get(self, "group") @group.setter def group(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "group", value) @property @pulumi.getter(name="maxConnections") def max_connections(self) -> Optional[pulumi.Input[int]]: """ Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. """ return pulumi.get(self, "max_connections") @max_connections.setter def max_connections(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_connections", value) @property @pulumi.getter(name="maxConnectionsPerEndpoint") def max_connections_per_endpoint(self) -> Optional[pulumi.Input[int]]: """ Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. """ return pulumi.get(self, "max_connections_per_endpoint") @max_connections_per_endpoint.setter def max_connections_per_endpoint(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_connections_per_endpoint", value) @property @pulumi.getter(name="maxConnectionsPerInstance") def max_connections_per_instance(self) -> Optional[pulumi.Input[int]]: """ Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. """ return pulumi.get(self, "max_connections_per_instance") @max_connections_per_instance.setter def max_connections_per_instance(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_connections_per_instance", value) @property @pulumi.getter(name="maxRate") def max_rate(self) -> Optional[pulumi.Input[int]]: """ Defines a maximum number of HTTP requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. """ return pulumi.get(self, "max_rate") @max_rate.setter def max_rate(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_rate", value) @property @pulumi.getter(name="maxRatePerEndpoint") def max_rate_per_endpoint(self) -> Optional[pulumi.Input[float]]: """ Defines a maximum target for requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. """ return pulumi.get(self, "max_rate_per_endpoint") @max_rate_per_endpoint.setter def max_rate_per_endpoint(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "max_rate_per_endpoint", value) @property @pulumi.getter(name="maxRatePerInstance") def max_rate_per_instance(self) -> Optional[pulumi.Input[float]]: """ Defines a maximum target for requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. """ return pulumi.get(self, "max_rate_per_instance") @max_rate_per_instance.setter def max_rate_per_instance(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "max_rate_per_instance", value) @property @pulumi.getter(name="maxUtilization") def max_utilization(self) -> Optional[pulumi.Input[float]]: """ Optional parameter to define a target capacity for the UTILIZATIONbalancing mode. The valid range is [0.0, 1.0]. For usage guidelines, see Utilization balancing mode. """ return pulumi.get(self, "max_utilization") @max_utilization.setter def max_utilization(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "max_utilization", value) @pulumi.input_type class BindingArgs: def __init__(__self__, *, binding_id: Optional[pulumi.Input[str]] = None, condition: Optional[pulumi.Input['ExprArgs']] = None, members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, role: Optional[pulumi.Input[str]] = None): """ Associates `members`, or principals, with a `role`. :param pulumi.Input[str] binding_id: This is deprecated and has no effect. Do not use. :param pulumi.Input['ExprArgs'] condition: The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). :param pulumi.Input[Sequence[pulumi.Input[str]]] members: Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. :param pulumi.Input[str] role: Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. """ if binding_id is not None: pulumi.set(__self__, "binding_id", binding_id) if condition is not None: pulumi.set(__self__, "condition", condition) if members is not None: pulumi.set(__self__, "members", members) if role is not None: pulumi.set(__self__, "role", role) @property @pulumi.getter(name="bindingId") def binding_id(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "binding_id") @binding_id.setter def binding_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "binding_id", value) @property @pulumi.getter def condition(self) -> Optional[pulumi.Input['ExprArgs']]: """ The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). """ return pulumi.get(self, "condition") @condition.setter def condition(self, value: Optional[pulumi.Input['ExprArgs']]): pulumi.set(self, "condition", value) @property @pulumi.getter def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. """ return pulumi.get(self, "members") @members.setter def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "members", value) @property @pulumi.getter def role(self) -> Optional[pulumi.Input[str]]: """ Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. """ return pulumi.get(self, "role") @role.setter def role(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "role", value) @pulumi.input_type class CacheKeyPolicyArgs: def __init__(__self__, *, include_host: Optional[pulumi.Input[bool]] = None, include_http_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, include_named_cookies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, include_protocol: Optional[pulumi.Input[bool]] = None, include_query_string: Optional[pulumi.Input[bool]] = None, query_string_blacklist: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, query_string_whitelist: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Message containing what to include in the cache key for a request for Cloud CDN. :param pulumi.Input[bool] include_host: If true, requests to different hosts will be cached separately. :param pulumi.Input[Sequence[pulumi.Input[str]]] include_http_headers: Allows HTTP request headers (by name) to be used in the cache key. :param pulumi.Input[Sequence[pulumi.Input[str]]] include_named_cookies: Allows HTTP cookies (by name) to be used in the cache key. The name=value pair will be used in the cache key Cloud CDN generates. :param pulumi.Input[bool] include_protocol: If true, http and https requests will be cached separately. :param pulumi.Input[bool] include_query_string: If true, include query string parameters in the cache key according to query_string_whitelist and query_string_blacklist. If neither is set, the entire query string will be included. If false, the query string will be excluded from the cache key entirely. :param pulumi.Input[Sequence[pulumi.Input[str]]] query_string_blacklist: Names of query string parameters to exclude in cache keys. All other parameters will be included. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters. :param pulumi.Input[Sequence[pulumi.Input[str]]] query_string_whitelist: Names of query string parameters to include in cache keys. All other parameters will be excluded. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters. """ if include_host is not None: pulumi.set(__self__, "include_host", include_host) if include_http_headers is not None: pulumi.set(__self__, "include_http_headers", include_http_headers) if include_named_cookies is not None: pulumi.set(__self__, "include_named_cookies", include_named_cookies) if include_protocol is not None: pulumi.set(__self__, "include_protocol", include_protocol) if include_query_string is not None: pulumi.set(__self__, "include_query_string", include_query_string) if query_string_blacklist is not None: pulumi.set(__self__, "query_string_blacklist", query_string_blacklist) if query_string_whitelist is not None: pulumi.set(__self__, "query_string_whitelist", query_string_whitelist) @property @pulumi.getter(name="includeHost") def include_host(self) -> Optional[pulumi.Input[bool]]: """ If true, requests to different hosts will be cached separately. """ return pulumi.get(self, "include_host") @include_host.setter def include_host(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "include_host", value) @property @pulumi.getter(name="includeHttpHeaders") def include_http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Allows HTTP request headers (by name) to be used in the cache key. """ return pulumi.get(self, "include_http_headers") @include_http_headers.setter def include_http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "include_http_headers", value) @property @pulumi.getter(name="includeNamedCookies") def include_named_cookies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Allows HTTP cookies (by name) to be used in the cache key. The name=value pair will be used in the cache key Cloud CDN generates. """ return pulumi.get(self, "include_named_cookies") @include_named_cookies.setter def include_named_cookies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "include_named_cookies", value) @property @pulumi.getter(name="includeProtocol") def include_protocol(self) -> Optional[pulumi.Input[bool]]: """ If true, http and https requests will be cached separately. """ return pulumi.get(self, "include_protocol") @include_protocol.setter def include_protocol(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "include_protocol", value) @property @pulumi.getter(name="includeQueryString") def include_query_string(self) -> Optional[pulumi.Input[bool]]: """ If true, include query string parameters in the cache key according to query_string_whitelist and query_string_blacklist. If neither is set, the entire query string will be included. If false, the query string will be excluded from the cache key entirely. """ return pulumi.get(self, "include_query_string") @include_query_string.setter def include_query_string(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "include_query_string", value) @property @pulumi.getter(name="queryStringBlacklist") def query_string_blacklist(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Names of query string parameters to exclude in cache keys. All other parameters will be included. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters. """ return pulumi.get(self, "query_string_blacklist") @query_string_blacklist.setter def query_string_blacklist(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "query_string_blacklist", value) @property @pulumi.getter(name="queryStringWhitelist") def query_string_whitelist(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Names of query string parameters to include in cache keys. All other parameters will be excluded. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters. """ return pulumi.get(self, "query_string_whitelist") @query_string_whitelist.setter def query_string_whitelist(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "query_string_whitelist", value) @pulumi.input_type class CallCredentialsArgs: def __init__(__self__, *, call_credential_type: Optional[pulumi.Input['CallCredentialsCallCredentialType']] = None, from_plugin: Optional[pulumi.Input['MetadataCredentialsFromPluginArgs']] = None): """ [Deprecated] gRPC call credentials to access the SDS server. gRPC call credentials to access the SDS server. :param pulumi.Input['CallCredentialsCallCredentialType'] call_credential_type: The type of call credentials to use for GRPC requests to the SDS server. This field can be set to one of the following: - GCE_VM: The local GCE VM service account credentials are used to access the SDS server. - FROM_PLUGIN: Custom authenticator credentials are used to access the SDS server. :param pulumi.Input['MetadataCredentialsFromPluginArgs'] from_plugin: Custom authenticator credentials. Valid if callCredentialType is FROM_PLUGIN. """ if call_credential_type is not None: pulumi.set(__self__, "call_credential_type", call_credential_type) if from_plugin is not None: pulumi.set(__self__, "from_plugin", from_plugin) @property @pulumi.getter(name="callCredentialType") def call_credential_type(self) -> Optional[pulumi.Input['CallCredentialsCallCredentialType']]: """ The type of call credentials to use for GRPC requests to the SDS server. This field can be set to one of the following: - GCE_VM: The local GCE VM service account credentials are used to access the SDS server. - FROM_PLUGIN: Custom authenticator credentials are used to access the SDS server. """ return pulumi.get(self, "call_credential_type") @call_credential_type.setter def call_credential_type(self, value: Optional[pulumi.Input['CallCredentialsCallCredentialType']]): pulumi.set(self, "call_credential_type", value) @property @pulumi.getter(name="fromPlugin") def from_plugin(self) -> Optional[pulumi.Input['MetadataCredentialsFromPluginArgs']]: """ Custom authenticator credentials. Valid if callCredentialType is FROM_PLUGIN. """ return pulumi.get(self, "from_plugin") @from_plugin.setter def from_plugin(self, value: Optional[pulumi.Input['MetadataCredentialsFromPluginArgs']]): pulumi.set(self, "from_plugin", value) @pulumi.input_type class ChannelCredentialsArgs: def __init__(__self__, *, certificates: Optional[pulumi.Input['TlsCertificatePathsArgs']] = None, channel_credential_type: Optional[pulumi.Input['ChannelCredentialsChannelCredentialType']] = None): """ [Deprecated] gRPC channel credentials to access the SDS server. gRPC channel credentials to access the SDS server. :param pulumi.Input['TlsCertificatePathsArgs'] certificates: The call credentials to access the SDS server. :param pulumi.Input['ChannelCredentialsChannelCredentialType'] channel_credential_type: The channel credentials to access the SDS server. This field can be set to one of the following: CERTIFICATES: Use TLS certificates to access the SDS server. GCE_VM: Use local GCE VM credentials to access the SDS server. """ if certificates is not None: pulumi.set(__self__, "certificates", certificates) if channel_credential_type is not None: pulumi.set(__self__, "channel_credential_type", channel_credential_type) @property @pulumi.getter def certificates(self) -> Optional[pulumi.Input['TlsCertificatePathsArgs']]: """ The call credentials to access the SDS server. """ return pulumi.get(self, "certificates") @certificates.setter def certificates(self, value: Optional[pulumi.Input['TlsCertificatePathsArgs']]): pulumi.set(self, "certificates", value) @property @pulumi.getter(name="channelCredentialType") def channel_credential_type(self) -> Optional[pulumi.Input['ChannelCredentialsChannelCredentialType']]: """ The channel credentials to access the SDS server. This field can be set to one of the following: CERTIFICATES: Use TLS certificates to access the SDS server. GCE_VM: Use local GCE VM credentials to access the SDS server. """ return pulumi.get(self, "channel_credential_type") @channel_credential_type.setter def channel_credential_type(self, value: Optional[pulumi.Input['ChannelCredentialsChannelCredentialType']]): pulumi.set(self, "channel_credential_type", value) @pulumi.input_type class CircuitBreakersArgs: def __init__(__self__, *, connect_timeout: Optional[pulumi.Input['DurationArgs']] = None, max_connections: Optional[pulumi.Input[int]] = None, max_pending_requests: Optional[pulumi.Input[int]] = None, max_requests: Optional[pulumi.Input[int]] = None, max_requests_per_connection: Optional[pulumi.Input[int]] = None, max_retries: Optional[pulumi.Input[int]] = None): """ Settings controlling the volume of requests, connections and retries to this backend service. :param pulumi.Input['DurationArgs'] connect_timeout: The timeout for new network connections to hosts. :param pulumi.Input[int] max_connections: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[int] max_pending_requests: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[int] max_requests: The maximum number of parallel requests that allowed to the backend service. If not specified, there is no limit. :param pulumi.Input[int] max_requests_per_connection: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[int] max_retries: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. """ if connect_timeout is not None: pulumi.set(__self__, "connect_timeout", connect_timeout) if max_connections is not None: pulumi.set(__self__, "max_connections", max_connections) if max_pending_requests is not None: pulumi.set(__self__, "max_pending_requests", max_pending_requests) if max_requests is not None: pulumi.set(__self__, "max_requests", max_requests) if max_requests_per_connection is not None: pulumi.set(__self__, "max_requests_per_connection", max_requests_per_connection) if max_retries is not None: pulumi.set(__self__, "max_retries", max_retries) @property @pulumi.getter(name="connectTimeout") def connect_timeout(self) -> Optional[pulumi.Input['DurationArgs']]: """ The timeout for new network connections to hosts. """ return pulumi.get(self, "connect_timeout") @connect_timeout.setter def connect_timeout(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "connect_timeout", value) @property @pulumi.getter(name="maxConnections") def max_connections(self) -> Optional[pulumi.Input[int]]: """ Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "max_connections") @max_connections.setter def max_connections(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_connections", value) @property @pulumi.getter(name="maxPendingRequests") def max_pending_requests(self) -> Optional[pulumi.Input[int]]: """ Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "max_pending_requests") @max_pending_requests.setter def max_pending_requests(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_pending_requests", value) @property @pulumi.getter(name="maxRequests") def max_requests(self) -> Optional[pulumi.Input[int]]: """ The maximum number of parallel requests that allowed to the backend service. If not specified, there is no limit. """ return pulumi.get(self, "max_requests") @max_requests.setter def max_requests(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_requests", value) @property @pulumi.getter(name="maxRequestsPerConnection") def max_requests_per_connection(self) -> Optional[pulumi.Input[int]]: """ Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "max_requests_per_connection") @max_requests_per_connection.setter def max_requests_per_connection(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_requests_per_connection", value) @property @pulumi.getter(name="maxRetries") def max_retries(self) -> Optional[pulumi.Input[int]]: """ Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "max_retries") @max_retries.setter def max_retries(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_retries", value) @pulumi.input_type class ConditionArgs: def __init__(__self__, *, iam: Optional[pulumi.Input['ConditionIam']] = None, op: Optional[pulumi.Input['ConditionOp']] = None, svc: Optional[pulumi.Input[str]] = None, sys: Optional[pulumi.Input['ConditionSys']] = None, values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input['ConditionIam'] iam: This is deprecated and has no effect. Do not use. :param pulumi.Input['ConditionOp'] op: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] svc: This is deprecated and has no effect. Do not use. :param pulumi.Input['ConditionSys'] sys: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input[str]]] values: This is deprecated and has no effect. Do not use. """ if iam is not None: pulumi.set(__self__, "iam", iam) if op is not None: pulumi.set(__self__, "op", op) if svc is not None: pulumi.set(__self__, "svc", svc) if sys is not None: pulumi.set(__self__, "sys", sys) if values is not None: pulumi.set(__self__, "values", values) @property @pulumi.getter def iam(self) -> Optional[pulumi.Input['ConditionIam']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "iam") @iam.setter def iam(self, value: Optional[pulumi.Input['ConditionIam']]): pulumi.set(self, "iam", value) @property @pulumi.getter def op(self) -> Optional[pulumi.Input['ConditionOp']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "op") @op.setter def op(self, value: Optional[pulumi.Input['ConditionOp']]): pulumi.set(self, "op", value) @property @pulumi.getter def svc(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "svc") @svc.setter def svc(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "svc", value) @property @pulumi.getter def sys(self) -> Optional[pulumi.Input['ConditionSys']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "sys") @sys.setter def sys(self, value: Optional[pulumi.Input['ConditionSys']]): pulumi.set(self, "sys", value) @property @pulumi.getter def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "values") @values.setter def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "values", value) @pulumi.input_type class ConfidentialInstanceConfigArgs: def __init__(__self__, *, enable_confidential_compute: Optional[pulumi.Input[bool]] = None): """ A set of Confidential Instance options. :param pulumi.Input[bool] enable_confidential_compute: Defines whether the instance should have confidential compute enabled. """ if enable_confidential_compute is not None: pulumi.set(__self__, "enable_confidential_compute", enable_confidential_compute) @property @pulumi.getter(name="enableConfidentialCompute") def enable_confidential_compute(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance should have confidential compute enabled. """ return pulumi.get(self, "enable_confidential_compute") @enable_confidential_compute.setter def enable_confidential_compute(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_confidential_compute", value) @pulumi.input_type class ConnectionDrainingArgs: def __init__(__self__, *, draining_timeout_sec: Optional[pulumi.Input[int]] = None): """ Message containing connection draining configuration. :param pulumi.Input[int] draining_timeout_sec: Configures a duration timeout for existing requests on a removed backend instance. For supported load balancers and protocols, as described in Enabling connection draining. """ if draining_timeout_sec is not None: pulumi.set(__self__, "draining_timeout_sec", draining_timeout_sec) @property @pulumi.getter(name="drainingTimeoutSec") def draining_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Configures a duration timeout for existing requests on a removed backend instance. For supported load balancers and protocols, as described in Enabling connection draining. """ return pulumi.get(self, "draining_timeout_sec") @draining_timeout_sec.setter def draining_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "draining_timeout_sec", value) @pulumi.input_type class ConsistentHashLoadBalancerSettingsHttpCookieArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, path: Optional[pulumi.Input[str]] = None, ttl: Optional[pulumi.Input['DurationArgs']] = None): """ The information about the HTTP Cookie on which the hash function is based for load balancing policies that use a consistent hash. :param pulumi.Input[str] name: Name of the cookie. :param pulumi.Input[str] path: Path to set for the cookie. :param pulumi.Input['DurationArgs'] ttl: Lifetime of the cookie. """ if name is not None: pulumi.set(__self__, "name", name) if path is not None: pulumi.set(__self__, "path", path) if ttl is not None: pulumi.set(__self__, "ttl", ttl) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the cookie. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def path(self) -> Optional[pulumi.Input[str]]: """ Path to set for the cookie. """ return pulumi.get(self, "path") @path.setter def path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "path", value) @property @pulumi.getter def ttl(self) -> Optional[pulumi.Input['DurationArgs']]: """ Lifetime of the cookie. """ return pulumi.get(self, "ttl") @ttl.setter def ttl(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "ttl", value) @pulumi.input_type class ConsistentHashLoadBalancerSettingsArgs: def __init__(__self__, *, http_cookie: Optional[pulumi.Input['ConsistentHashLoadBalancerSettingsHttpCookieArgs']] = None, http_header_name: Optional[pulumi.Input[str]] = None, minimum_ring_size: Optional[pulumi.Input[str]] = None): """ This message defines settings for a consistent hash style load balancer. :param pulumi.Input['ConsistentHashLoadBalancerSettingsHttpCookieArgs'] http_cookie: Hash is based on HTTP Cookie. This field describes a HTTP cookie that will be used as the hash key for the consistent hash load balancer. If the cookie is not present, it will be generated. This field is applicable if the sessionAffinity is set to HTTP_COOKIE. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[str] http_header_name: The hash based on the value of the specified header field. This field is applicable if the sessionAffinity is set to HEADER_FIELD. :param pulumi.Input[str] minimum_ring_size: The minimum number of virtual nodes to use for the hash ring. Defaults to 1024. Larger ring sizes result in more granular load distributions. If the number of hosts in the load balancing pool is larger than the ring size, each host will be assigned a single virtual node. """ if http_cookie is not None: pulumi.set(__self__, "http_cookie", http_cookie) if http_header_name is not None: pulumi.set(__self__, "http_header_name", http_header_name) if minimum_ring_size is not None: pulumi.set(__self__, "minimum_ring_size", minimum_ring_size) @property @pulumi.getter(name="httpCookie") def http_cookie(self) -> Optional[pulumi.Input['ConsistentHashLoadBalancerSettingsHttpCookieArgs']]: """ Hash is based on HTTP Cookie. This field describes a HTTP cookie that will be used as the hash key for the consistent hash load balancer. If the cookie is not present, it will be generated. This field is applicable if the sessionAffinity is set to HTTP_COOKIE. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "http_cookie") @http_cookie.setter def http_cookie(self, value: Optional[pulumi.Input['ConsistentHashLoadBalancerSettingsHttpCookieArgs']]): pulumi.set(self, "http_cookie", value) @property @pulumi.getter(name="httpHeaderName") def http_header_name(self) -> Optional[pulumi.Input[str]]: """ The hash based on the value of the specified header field. This field is applicable if the sessionAffinity is set to HEADER_FIELD. """ return pulumi.get(self, "http_header_name") @http_header_name.setter def http_header_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "http_header_name", value) @property @pulumi.getter(name="minimumRingSize") def minimum_ring_size(self) -> Optional[pulumi.Input[str]]: """ The minimum number of virtual nodes to use for the hash ring. Defaults to 1024. Larger ring sizes result in more granular load distributions. If the number of hosts in the load balancing pool is larger than the ring size, each host will be assigned a single virtual node. """ return pulumi.get(self, "minimum_ring_size") @minimum_ring_size.setter def minimum_ring_size(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "minimum_ring_size", value) @pulumi.input_type class CorsPolicyArgs: def __init__(__self__, *, allow_credentials: Optional[pulumi.Input[bool]] = None, allow_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, allow_methods: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, allow_origin_regexes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, allow_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, disabled: Optional[pulumi.Input[bool]] = None, expose_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, max_age: Optional[pulumi.Input[int]] = None): """ The specification for allowing client-side cross-origin requests. For more information about the W3C recommendation for cross-origin resource sharing (CORS), see Fetch API Living Standard. :param pulumi.Input[bool] allow_credentials: In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This field translates to the Access-Control-Allow-Credentials header. Default is false. :param pulumi.Input[Sequence[pulumi.Input[str]]] allow_headers: Specifies the content for the Access-Control-Allow-Headers header. :param pulumi.Input[Sequence[pulumi.Input[str]]] allow_methods: Specifies the content for the Access-Control-Allow-Methods header. :param pulumi.Input[Sequence[pulumi.Input[str]]] allow_origin_regexes: Specifies a regular expression that matches allowed origins. For more information about the regular expression syntax, see Syntax. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. :param pulumi.Input[Sequence[pulumi.Input[str]]] allow_origins: Specifies the list of origins that is allowed to do CORS requests. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. :param pulumi.Input[bool] disabled: If true, the setting specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect. :param pulumi.Input[Sequence[pulumi.Input[str]]] expose_headers: Specifies the content for the Access-Control-Expose-Headers header. :param pulumi.Input[int] max_age: Specifies how long results of a preflight request can be cached in seconds. This field translates to the Access-Control-Max-Age header. """ if allow_credentials is not None: pulumi.set(__self__, "allow_credentials", allow_credentials) if allow_headers is not None: pulumi.set(__self__, "allow_headers", allow_headers) if allow_methods is not None: pulumi.set(__self__, "allow_methods", allow_methods) if allow_origin_regexes is not None: pulumi.set(__self__, "allow_origin_regexes", allow_origin_regexes) if allow_origins is not None: pulumi.set(__self__, "allow_origins", allow_origins) if disabled is not None: pulumi.set(__self__, "disabled", disabled) if expose_headers is not None: pulumi.set(__self__, "expose_headers", expose_headers) if max_age is not None: pulumi.set(__self__, "max_age", max_age) @property @pulumi.getter(name="allowCredentials") def allow_credentials(self) -> Optional[pulumi.Input[bool]]: """ In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This field translates to the Access-Control-Allow-Credentials header. Default is false. """ return pulumi.get(self, "allow_credentials") @allow_credentials.setter def allow_credentials(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "allow_credentials", value) @property @pulumi.getter(name="allowHeaders") def allow_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the content for the Access-Control-Allow-Headers header. """ return pulumi.get(self, "allow_headers") @allow_headers.setter def allow_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "allow_headers", value) @property @pulumi.getter(name="allowMethods") def allow_methods(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the content for the Access-Control-Allow-Methods header. """ return pulumi.get(self, "allow_methods") @allow_methods.setter def allow_methods(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "allow_methods", value) @property @pulumi.getter(name="allowOriginRegexes") def allow_origin_regexes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies a regular expression that matches allowed origins. For more information about the regular expression syntax, see Syntax. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. """ return pulumi.get(self, "allow_origin_regexes") @allow_origin_regexes.setter def allow_origin_regexes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "allow_origin_regexes", value) @property @pulumi.getter(name="allowOrigins") def allow_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the list of origins that is allowed to do CORS requests. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. """ return pulumi.get(self, "allow_origins") @allow_origins.setter def allow_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "allow_origins", value) @property @pulumi.getter def disabled(self) -> Optional[pulumi.Input[bool]]: """ If true, the setting specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect. """ return pulumi.get(self, "disabled") @disabled.setter def disabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "disabled", value) @property @pulumi.getter(name="exposeHeaders") def expose_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the content for the Access-Control-Expose-Headers header. """ return pulumi.get(self, "expose_headers") @expose_headers.setter def expose_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "expose_headers", value) @property @pulumi.getter(name="maxAge") def max_age(self) -> Optional[pulumi.Input[int]]: """ Specifies how long results of a preflight request can be cached in seconds. This field translates to the Access-Control-Max-Age header. """ return pulumi.get(self, "max_age") @max_age.setter def max_age(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_age", value) @pulumi.input_type class CustomerEncryptionKeyArgs: def __init__(__self__, *, kms_key_name: Optional[pulumi.Input[str]] = None, kms_key_service_account: Optional[pulumi.Input[str]] = None, raw_key: Optional[pulumi.Input[str]] = None, rsa_encrypted_key: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] kms_key_name: The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key :param pulumi.Input[str] kms_key_service_account: The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ :param pulumi.Input[str] raw_key: Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" :param pulumi.Input[str] rsa_encrypted_key: Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem """ if kms_key_name is not None: pulumi.set(__self__, "kms_key_name", kms_key_name) if kms_key_service_account is not None: pulumi.set(__self__, "kms_key_service_account", kms_key_service_account) if raw_key is not None: pulumi.set(__self__, "raw_key", raw_key) if rsa_encrypted_key is not None: pulumi.set(__self__, "rsa_encrypted_key", rsa_encrypted_key) @property @pulumi.getter(name="kmsKeyName") def kms_key_name(self) -> Optional[pulumi.Input[str]]: """ The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key """ return pulumi.get(self, "kms_key_name") @kms_key_name.setter def kms_key_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "kms_key_name", value) @property @pulumi.getter(name="kmsKeyServiceAccount") def kms_key_service_account(self) -> Optional[pulumi.Input[str]]: """ The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ """ return pulumi.get(self, "kms_key_service_account") @kms_key_service_account.setter def kms_key_service_account(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "kms_key_service_account", value) @property @pulumi.getter(name="rawKey") def raw_key(self) -> Optional[pulumi.Input[str]]: """ Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" """ return pulumi.get(self, "raw_key") @raw_key.setter def raw_key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "raw_key", value) @property @pulumi.getter(name="rsaEncryptedKey") def rsa_encrypted_key(self) -> Optional[pulumi.Input[str]]: """ Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem """ return pulumi.get(self, "rsa_encrypted_key") @rsa_encrypted_key.setter def rsa_encrypted_key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "rsa_encrypted_key", value) @pulumi.input_type class DeprecationStatusArgs: def __init__(__self__, *, deleted: Optional[pulumi.Input[str]] = None, deprecated: Optional[pulumi.Input[str]] = None, obsolete: Optional[pulumi.Input[str]] = None, replacement: Optional[pulumi.Input[str]] = None, state: Optional[pulumi.Input['DeprecationStatusState']] = None, state_override: Optional[pulumi.Input['RolloutPolicyArgs']] = None): """ Deprecation status for a public resource. :param pulumi.Input[str] deleted: An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it. :param pulumi.Input[str] deprecated: An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it. :param pulumi.Input[str] obsolete: An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it. :param pulumi.Input[str] replacement: The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource. :param pulumi.Input['DeprecationStatusState'] state: The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error. :param pulumi.Input['RolloutPolicyArgs'] state_override: The rollout policy for this deprecation. This policy is only enforced by image family views. The rollout policy restricts the zones where the associated resource is considered in a deprecated state. When the rollout policy does not include the user specified zone, or if the zone is rolled out, the associated resource is considered in a deprecated state. The rollout policy for this deprecation is read-only, except for allowlisted users. This field might not be configured. To view the latest non-deprecated image in a specific zone, use the imageFamilyViews.get method. """ if deleted is not None: pulumi.set(__self__, "deleted", deleted) if deprecated is not None: pulumi.set(__self__, "deprecated", deprecated) if obsolete is not None: pulumi.set(__self__, "obsolete", obsolete) if replacement is not None: pulumi.set(__self__, "replacement", replacement) if state is not None: pulumi.set(__self__, "state", state) if state_override is not None: pulumi.set(__self__, "state_override", state_override) @property @pulumi.getter def deleted(self) -> Optional[pulumi.Input[str]]: """ An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it. """ return pulumi.get(self, "deleted") @deleted.setter def deleted(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "deleted", value) @property @pulumi.getter def deprecated(self) -> Optional[pulumi.Input[str]]: """ An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it. """ return pulumi.get(self, "deprecated") @deprecated.setter def deprecated(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "deprecated", value) @property @pulumi.getter def obsolete(self) -> Optional[pulumi.Input[str]]: """ An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it. """ return pulumi.get(self, "obsolete") @obsolete.setter def obsolete(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "obsolete", value) @property @pulumi.getter def replacement(self) -> Optional[pulumi.Input[str]]: """ The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource. """ return pulumi.get(self, "replacement") @replacement.setter def replacement(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "replacement", value) @property @pulumi.getter def state(self) -> Optional[pulumi.Input['DeprecationStatusState']]: """ The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error. """ return pulumi.get(self, "state") @state.setter def state(self, value: Optional[pulumi.Input['DeprecationStatusState']]): pulumi.set(self, "state", value) @property @pulumi.getter(name="stateOverride") def state_override(self) -> Optional[pulumi.Input['RolloutPolicyArgs']]: """ The rollout policy for this deprecation. This policy is only enforced by image family views. The rollout policy restricts the zones where the associated resource is considered in a deprecated state. When the rollout policy does not include the user specified zone, or if the zone is rolled out, the associated resource is considered in a deprecated state. The rollout policy for this deprecation is read-only, except for allowlisted users. This field might not be configured. To view the latest non-deprecated image in a specific zone, use the imageFamilyViews.get method. """ return pulumi.get(self, "state_override") @state_override.setter def state_override(self, value: Optional[pulumi.Input['RolloutPolicyArgs']]): pulumi.set(self, "state_override", value) @pulumi.input_type class DiskInstantiationConfigArgs: def __init__(__self__, *, auto_delete: Optional[pulumi.Input[bool]] = None, custom_image: Optional[pulumi.Input[str]] = None, device_name: Optional[pulumi.Input[str]] = None, instantiate_from: Optional[pulumi.Input['DiskInstantiationConfigInstantiateFrom']] = None): """ A specification of the desired way to instantiate a disk in the instance template when its created from a source instance. :param pulumi.Input[bool] auto_delete: Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). :param pulumi.Input[str] custom_image: The custom source image to be used to restore this disk when instantiating this instance template. :param pulumi.Input[str] device_name: Specifies the device name of the disk to which the configurations apply to. :param pulumi.Input['DiskInstantiationConfigInstantiateFrom'] instantiate_from: Specifies whether to include the disk and what image to use. Possible values are: - source-image: to use the same image that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - source-image-family: to use the same image family that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and additional read-write disks. - attach-read-only: to attach a read-only disk. Applicable to read-only disks. - do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, local SSDs, and read-only disks. """ if auto_delete is not None: pulumi.set(__self__, "auto_delete", auto_delete) if custom_image is not None: pulumi.set(__self__, "custom_image", custom_image) if device_name is not None: pulumi.set(__self__, "device_name", device_name) if instantiate_from is not None: pulumi.set(__self__, "instantiate_from", instantiate_from) @property @pulumi.getter(name="autoDelete") def auto_delete(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). """ return pulumi.get(self, "auto_delete") @auto_delete.setter def auto_delete(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "auto_delete", value) @property @pulumi.getter(name="customImage") def custom_image(self) -> Optional[pulumi.Input[str]]: """ The custom source image to be used to restore this disk when instantiating this instance template. """ return pulumi.get(self, "custom_image") @custom_image.setter def custom_image(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "custom_image", value) @property @pulumi.getter(name="deviceName") def device_name(self) -> Optional[pulumi.Input[str]]: """ Specifies the device name of the disk to which the configurations apply to. """ return pulumi.get(self, "device_name") @device_name.setter def device_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "device_name", value) @property @pulumi.getter(name="instantiateFrom") def instantiate_from(self) -> Optional[pulumi.Input['DiskInstantiationConfigInstantiateFrom']]: """ Specifies whether to include the disk and what image to use. Possible values are: - source-image: to use the same image that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - source-image-family: to use the same image family that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and additional read-write disks. - attach-read-only: to attach a read-only disk. Applicable to read-only disks. - do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, local SSDs, and read-only disks. """ return pulumi.get(self, "instantiate_from") @instantiate_from.setter def instantiate_from(self, value: Optional[pulumi.Input['DiskInstantiationConfigInstantiateFrom']]): pulumi.set(self, "instantiate_from", value) @pulumi.input_type class DisplayDeviceArgs: def __init__(__self__, *, enable_display: Optional[pulumi.Input[bool]] = None): """ A set of Display Device options :param pulumi.Input[bool] enable_display: Defines whether the instance has Display enabled. """ if enable_display is not None: pulumi.set(__self__, "enable_display", enable_display) @property @pulumi.getter(name="enableDisplay") def enable_display(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has Display enabled. """ return pulumi.get(self, "enable_display") @enable_display.setter def enable_display(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_display", value) @pulumi.input_type class DistributionPolicyZoneConfigurationArgs: def __init__(__self__, *, zone: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] zone: The URL of the zone. The zone must exist in the region where the managed instance group is located. """ if zone is not None: pulumi.set(__self__, "zone", zone) @property @pulumi.getter def zone(self) -> Optional[pulumi.Input[str]]: """ The URL of the zone. The zone must exist in the region where the managed instance group is located. """ return pulumi.get(self, "zone") @zone.setter def zone(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "zone", value) @pulumi.input_type class DistributionPolicyArgs: def __init__(__self__, *, target_shape: Optional[pulumi.Input['DistributionPolicyTargetShape']] = None, zones: Optional[pulumi.Input[Sequence[pulumi.Input['DistributionPolicyZoneConfigurationArgs']]]] = None): """ :param pulumi.Input['DistributionPolicyTargetShape'] target_shape: The distribution shape to which the group converges either proactively or on resize events (depending on the value set in updatePolicy.instanceRedistributionType). :param pulumi.Input[Sequence[pulumi.Input['DistributionPolicyZoneConfigurationArgs']]] zones: Zones where the regional managed instance group will create and manage its instances. """ if target_shape is not None: pulumi.set(__self__, "target_shape", target_shape) if zones is not None: pulumi.set(__self__, "zones", zones) @property @pulumi.getter(name="targetShape") def target_shape(self) -> Optional[pulumi.Input['DistributionPolicyTargetShape']]: """ The distribution shape to which the group converges either proactively or on resize events (depending on the value set in updatePolicy.instanceRedistributionType). """ return pulumi.get(self, "target_shape") @target_shape.setter def target_shape(self, value: Optional[pulumi.Input['DistributionPolicyTargetShape']]): pulumi.set(self, "target_shape", value) @property @pulumi.getter def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DistributionPolicyZoneConfigurationArgs']]]]: """ Zones where the regional managed instance group will create and manage its instances. """ return pulumi.get(self, "zones") @zones.setter def zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DistributionPolicyZoneConfigurationArgs']]]]): pulumi.set(self, "zones", value) @pulumi.input_type class DurationArgs: def __init__(__self__, *, nanos: Optional[pulumi.Input[int]] = None, seconds: Optional[pulumi.Input[str]] = None): """ A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. :param pulumi.Input[int] nanos: Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. :param pulumi.Input[str] seconds: Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years """ if nanos is not None: pulumi.set(__self__, "nanos", nanos) if seconds is not None: pulumi.set(__self__, "seconds", seconds) @property @pulumi.getter def nanos(self) -> Optional[pulumi.Input[int]]: """ Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. """ return pulumi.get(self, "nanos") @nanos.setter def nanos(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "nanos", value) @property @pulumi.getter def seconds(self) -> Optional[pulumi.Input[str]]: """ Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years """ return pulumi.get(self, "seconds") @seconds.setter def seconds(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "seconds", value) @pulumi.input_type class ExprArgs: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, expression: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, title: Optional[pulumi.Input[str]] = None): """ Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. :param pulumi.Input[str] description: Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. :param pulumi.Input[str] expression: Textual representation of an expression in Common Expression Language syntax. :param pulumi.Input[str] location: Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. :param pulumi.Input[str] title: Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. """ if description is not None: pulumi.set(__self__, "description", description) if expression is not None: pulumi.set(__self__, "expression", expression) if location is not None: pulumi.set(__self__, "location", location) if title is not None: pulumi.set(__self__, "title", title) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def expression(self) -> Optional[pulumi.Input[str]]: """ Textual representation of an expression in Common Expression Language syntax. """ return pulumi.get(self, "expression") @expression.setter def expression(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "expression", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter def title(self) -> Optional[pulumi.Input[str]]: """ Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. """ return pulumi.get(self, "title") @title.setter def title(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "title", value) @pulumi.input_type class ExternalVpnGatewayInterfaceArgs: def __init__(__self__, *, id: Optional[pulumi.Input[int]] = None, ip_address: Optional[pulumi.Input[str]] = None): """ The interface for the external VPN gateway. :param pulumi.Input[int] id: The numeric ID of this interface. The allowed input values for this id for different redundancy types of external VPN gateway: - SINGLE_IP_INTERNALLY_REDUNDANT - 0 - TWO_IPS_REDUNDANCY - 0, 1 - FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 :param pulumi.Input[str] ip_address: IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine. """ if id is not None: pulumi.set(__self__, "id", id) if ip_address is not None: pulumi.set(__self__, "ip_address", ip_address) @property @pulumi.getter def id(self) -> Optional[pulumi.Input[int]]: """ The numeric ID of this interface. The allowed input values for this id for different redundancy types of external VPN gateway: - SINGLE_IP_INTERNALLY_REDUNDANT - 0 - TWO_IPS_REDUNDANCY - 0, 1 - FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 """ return pulumi.get(self, "id") @id.setter def id(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "id", value) @property @pulumi.getter(name="ipAddress") def ip_address(self) -> Optional[pulumi.Input[str]]: """ IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine. """ return pulumi.get(self, "ip_address") @ip_address.setter def ip_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_address", value) @pulumi.input_type class FileContentBufferArgs: def __init__(__self__, *, content: Optional[pulumi.Input[str]] = None, file_type: Optional[pulumi.Input['FileContentBufferFileType']] = None): """ :param pulumi.Input[str] content: The raw content in the secure keys file. :param pulumi.Input['FileContentBufferFileType'] file_type: The file type of source file. """ if content is not None: pulumi.set(__self__, "content", content) if file_type is not None: pulumi.set(__self__, "file_type", file_type) @property @pulumi.getter def content(self) -> Optional[pulumi.Input[str]]: """ The raw content in the secure keys file. """ return pulumi.get(self, "content") @content.setter def content(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "content", value) @property @pulumi.getter(name="fileType") def file_type(self) -> Optional[pulumi.Input['FileContentBufferFileType']]: """ The file type of source file. """ return pulumi.get(self, "file_type") @file_type.setter def file_type(self, value: Optional[pulumi.Input['FileContentBufferFileType']]): pulumi.set(self, "file_type", value) @pulumi.input_type class FirewallAllowedItemArgs: def __init__(__self__, *, ip_protocol: Optional[pulumi.Input[str]] = None, ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. """ if ip_protocol is not None: pulumi.set(__self__, "ip_protocol", ip_protocol) if ports is not None: pulumi.set(__self__, "ports", ports) @property @pulumi.getter(name="ipProtocol") def ip_protocol(self) -> Optional[pulumi.Input[str]]: """ The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. """ return pulumi.get(self, "ip_protocol") @ip_protocol.setter def ip_protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_protocol", value) @property @pulumi.getter def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. """ return pulumi.get(self, "ports") @ports.setter def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ports", value) @pulumi.input_type class FirewallDeniedItemArgs: def __init__(__self__, *, ip_protocol: Optional[pulumi.Input[str]] = None, ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. """ if ip_protocol is not None: pulumi.set(__self__, "ip_protocol", ip_protocol) if ports is not None: pulumi.set(__self__, "ports", ports) @property @pulumi.getter(name="ipProtocol") def ip_protocol(self) -> Optional[pulumi.Input[str]]: """ The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. """ return pulumi.get(self, "ip_protocol") @ip_protocol.setter def ip_protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_protocol", value) @property @pulumi.getter def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. """ return pulumi.get(self, "ports") @ports.setter def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ports", value) @pulumi.input_type class FirewallLogConfigArgs: def __init__(__self__, *, enable: Optional[pulumi.Input[bool]] = None, metadata: Optional[pulumi.Input['FirewallLogConfigMetadata']] = None): """ The available logging options for a firewall rule. :param pulumi.Input[bool] enable: This field denotes whether to enable logging for a particular firewall rule. :param pulumi.Input['FirewallLogConfigMetadata'] metadata: This field can only be specified for a particular firewall rule if logging is enabled for that rule. This field denotes whether to include or exclude metadata for firewall logs. """ if enable is not None: pulumi.set(__self__, "enable", enable) if metadata is not None: pulumi.set(__self__, "metadata", metadata) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input[bool]]: """ This field denotes whether to enable logging for a particular firewall rule. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable", value) @property @pulumi.getter def metadata(self) -> Optional[pulumi.Input['FirewallLogConfigMetadata']]: """ This field can only be specified for a particular firewall rule if logging is enabled for that rule. This field denotes whether to include or exclude metadata for firewall logs. """ return pulumi.get(self, "metadata") @metadata.setter def metadata(self, value: Optional[pulumi.Input['FirewallLogConfigMetadata']]): pulumi.set(self, "metadata", value) @pulumi.input_type class FirewallPolicyAssociationArgs: def __init__(__self__, *, attachment_target: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] attachment_target: The target that the firewall policy is attached to. :param pulumi.Input[str] name: The name for an association. """ if attachment_target is not None: pulumi.set(__self__, "attachment_target", attachment_target) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="attachmentTarget") def attachment_target(self) -> Optional[pulumi.Input[str]]: """ The target that the firewall policy is attached to. """ return pulumi.get(self, "attachment_target") @attachment_target.setter def attachment_target(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "attachment_target", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name for an association. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class FirewallPolicyRuleMatcherLayer4ConfigArgs: def __init__(__self__, *, ip_protocol: Optional[pulumi.Input[str]] = None, ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. """ if ip_protocol is not None: pulumi.set(__self__, "ip_protocol", ip_protocol) if ports is not None: pulumi.set(__self__, "ports", ports) @property @pulumi.getter(name="ipProtocol") def ip_protocol(self) -> Optional[pulumi.Input[str]]: """ The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. """ return pulumi.get(self, "ip_protocol") @ip_protocol.setter def ip_protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_protocol", value) @property @pulumi.getter def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. """ return pulumi.get(self, "ports") @ports.setter def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ports", value) @pulumi.input_type class FirewallPolicyRuleMatcherArgs: def __init__(__self__, *, dest_address_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, dest_fqdns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, dest_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, dest_region_codes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, layer4_configs: Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleMatcherLayer4ConfigArgs']]]] = None, src_address_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, src_fqdns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, src_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, src_region_codes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, src_secure_tags: Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]]] = None): """ Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_address_groups: Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_fqdns: Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 1000. :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_ip_ranges: CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_region_codes: Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. :param pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleMatcherLayer4ConfigArgs']]] layer4_configs: Pairs of IP protocols and ports that the rule should match. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_address_groups: Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_fqdns: Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 1000. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_ip_ranges: CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_region_codes: Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. :param pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]] src_secure_tags: List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. """ if dest_address_groups is not None: pulumi.set(__self__, "dest_address_groups", dest_address_groups) if dest_fqdns is not None: pulumi.set(__self__, "dest_fqdns", dest_fqdns) if dest_ip_ranges is not None: pulumi.set(__self__, "dest_ip_ranges", dest_ip_ranges) if dest_region_codes is not None: pulumi.set(__self__, "dest_region_codes", dest_region_codes) if layer4_configs is not None: pulumi.set(__self__, "layer4_configs", layer4_configs) if src_address_groups is not None: pulumi.set(__self__, "src_address_groups", src_address_groups) if src_fqdns is not None: pulumi.set(__self__, "src_fqdns", src_fqdns) if src_ip_ranges is not None: pulumi.set(__self__, "src_ip_ranges", src_ip_ranges) if src_region_codes is not None: pulumi.set(__self__, "src_region_codes", src_region_codes) if src_secure_tags is not None: pulumi.set(__self__, "src_secure_tags", src_secure_tags) @property @pulumi.getter(name="destAddressGroups") def dest_address_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. """ return pulumi.get(self, "dest_address_groups") @dest_address_groups.setter def dest_address_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dest_address_groups", value) @property @pulumi.getter(name="destFqdns") def dest_fqdns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 1000. """ return pulumi.get(self, "dest_fqdns") @dest_fqdns.setter def dest_fqdns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dest_fqdns", value) @property @pulumi.getter(name="destIpRanges") def dest_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. """ return pulumi.get(self, "dest_ip_ranges") @dest_ip_ranges.setter def dest_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dest_ip_ranges", value) @property @pulumi.getter(name="destRegionCodes") def dest_region_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. """ return pulumi.get(self, "dest_region_codes") @dest_region_codes.setter def dest_region_codes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dest_region_codes", value) @property @pulumi.getter(name="layer4Configs") def layer4_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleMatcherLayer4ConfigArgs']]]]: """ Pairs of IP protocols and ports that the rule should match. """ return pulumi.get(self, "layer4_configs") @layer4_configs.setter def layer4_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleMatcherLayer4ConfigArgs']]]]): pulumi.set(self, "layer4_configs", value) @property @pulumi.getter(name="srcAddressGroups") def src_address_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. """ return pulumi.get(self, "src_address_groups") @src_address_groups.setter def src_address_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "src_address_groups", value) @property @pulumi.getter(name="srcFqdns") def src_fqdns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 1000. """ return pulumi.get(self, "src_fqdns") @src_fqdns.setter def src_fqdns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "src_fqdns", value) @property @pulumi.getter(name="srcIpRanges") def src_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. """ return pulumi.get(self, "src_ip_ranges") @src_ip_ranges.setter def src_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "src_ip_ranges", value) @property @pulumi.getter(name="srcRegionCodes") def src_region_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. """ return pulumi.get(self, "src_region_codes") @src_region_codes.setter def src_region_codes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "src_region_codes", value) @property @pulumi.getter(name="srcSecureTags") def src_secure_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]]]: """ List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. """ return pulumi.get(self, "src_secure_tags") @src_secure_tags.setter def src_secure_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]]]): pulumi.set(self, "src_secure_tags", value) @pulumi.input_type class FirewallPolicyRuleSecureTagArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] name: Name of the secure tag, created with TagManager's TagValue API. """ if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the secure tag, created with TagManager's TagValue API. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class FirewallPolicyRuleArgs: def __init__(__self__, *, action: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, direction: Optional[pulumi.Input['FirewallPolicyRuleDirection']] = None, disabled: Optional[pulumi.Input[bool]] = None, enable_logging: Optional[pulumi.Input[bool]] = None, match: Optional[pulumi.Input['FirewallPolicyRuleMatcherArgs']] = None, priority: Optional[pulumi.Input[int]] = None, target_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, target_secure_tags: Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]]] = None, target_service_accounts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny). :param pulumi.Input[str] action: The Action to perform when the client connection triggers the rule. Can currently be either "allow" or "deny()" where valid values for status are 403, 404, and 502. :param pulumi.Input[str] description: An optional description for this resource. :param pulumi.Input['FirewallPolicyRuleDirection'] direction: The direction in which this rule applies. :param pulumi.Input[bool] disabled: Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. :param pulumi.Input[bool] enable_logging: Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. :param pulumi.Input['FirewallPolicyRuleMatcherArgs'] match: A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. :param pulumi.Input[int] priority: An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. :param pulumi.Input[Sequence[pulumi.Input[str]]] target_resources: A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. :param pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]] target_secure_tags: A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. :param pulumi.Input[Sequence[pulumi.Input[str]]] target_service_accounts: A list of service accounts indicating the sets of instances that are applied with this rule. """ if action is not None: pulumi.set(__self__, "action", action) if description is not None: pulumi.set(__self__, "description", description) if direction is not None: pulumi.set(__self__, "direction", direction) if disabled is not None: pulumi.set(__self__, "disabled", disabled) if enable_logging is not None: pulumi.set(__self__, "enable_logging", enable_logging) if match is not None: pulumi.set(__self__, "match", match) if priority is not None: pulumi.set(__self__, "priority", priority) if target_resources is not None: pulumi.set(__self__, "target_resources", target_resources) if target_secure_tags is not None: pulumi.set(__self__, "target_secure_tags", target_secure_tags) if target_service_accounts is not None: pulumi.set(__self__, "target_service_accounts", target_service_accounts) @property @pulumi.getter def action(self) -> Optional[pulumi.Input[str]]: """ The Action to perform when the client connection triggers the rule. Can currently be either "allow" or "deny()" where valid values for status are 403, 404, and 502. """ return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "action", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description for this resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def direction(self) -> Optional[pulumi.Input['FirewallPolicyRuleDirection']]: """ The direction in which this rule applies. """ return pulumi.get(self, "direction") @direction.setter def direction(self, value: Optional[pulumi.Input['FirewallPolicyRuleDirection']]): pulumi.set(self, "direction", value) @property @pulumi.getter def disabled(self) -> Optional[pulumi.Input[bool]]: """ Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. """ return pulumi.get(self, "disabled") @disabled.setter def disabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "disabled", value) @property @pulumi.getter(name="enableLogging") def enable_logging(self) -> Optional[pulumi.Input[bool]]: """ Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. """ return pulumi.get(self, "enable_logging") @enable_logging.setter def enable_logging(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_logging", value) @property @pulumi.getter def match(self) -> Optional[pulumi.Input['FirewallPolicyRuleMatcherArgs']]: """ A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. """ return pulumi.get(self, "match") @match.setter def match(self, value: Optional[pulumi.Input['FirewallPolicyRuleMatcherArgs']]): pulumi.set(self, "match", value) @property @pulumi.getter def priority(self) -> Optional[pulumi.Input[int]]: """ An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. """ return pulumi.get(self, "priority") @priority.setter def priority(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "priority", value) @property @pulumi.getter(name="targetResources") def target_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. """ return pulumi.get(self, "target_resources") @target_resources.setter def target_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "target_resources", value) @property @pulumi.getter(name="targetSecureTags") def target_secure_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]]]: """ A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. """ return pulumi.get(self, "target_secure_tags") @target_secure_tags.setter def target_secure_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]]]): pulumi.set(self, "target_secure_tags", value) @property @pulumi.getter(name="targetServiceAccounts") def target_service_accounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of service accounts indicating the sets of instances that are applied with this rule. """ return pulumi.get(self, "target_service_accounts") @target_service_accounts.setter def target_service_accounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "target_service_accounts", value) @pulumi.input_type class FixedOrPercentArgs: def __init__(__self__, *, fixed: Optional[pulumi.Input[int]] = None, percent: Optional[pulumi.Input[int]] = None): """ Encapsulates numeric value that can be either absolute or relative. :param pulumi.Input[int] fixed: Specifies a fixed number of VM instances. This must be a positive integer. :param pulumi.Input[int] percent: Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. """ if fixed is not None: pulumi.set(__self__, "fixed", fixed) if percent is not None: pulumi.set(__self__, "percent", percent) @property @pulumi.getter def fixed(self) -> Optional[pulumi.Input[int]]: """ Specifies a fixed number of VM instances. This must be a positive integer. """ return pulumi.get(self, "fixed") @fixed.setter def fixed(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "fixed", value) @property @pulumi.getter def percent(self) -> Optional[pulumi.Input[int]]: """ Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. """ return pulumi.get(self, "percent") @percent.setter def percent(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "percent", value) @pulumi.input_type class ForwardingRuleServiceDirectoryRegistrationArgs: def __init__(__self__, *, namespace: Optional[pulumi.Input[str]] = None, service: Optional[pulumi.Input[str]] = None, service_directory_region: Optional[pulumi.Input[str]] = None): """ Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule. :param pulumi.Input[str] namespace: Service Directory namespace to register the forwarding rule under. :param pulumi.Input[str] service: Service Directory service to register the forwarding rule under. :param pulumi.Input[str] service_directory_region: [Optional] Service Directory region to register this global forwarding rule under. Default to "us-central1". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region. """ if namespace is not None: pulumi.set(__self__, "namespace", namespace) if service is not None: pulumi.set(__self__, "service", service) if service_directory_region is not None: pulumi.set(__self__, "service_directory_region", service_directory_region) @property @pulumi.getter def namespace(self) -> Optional[pulumi.Input[str]]: """ Service Directory namespace to register the forwarding rule under. """ return pulumi.get(self, "namespace") @namespace.setter def namespace(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "namespace", value) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ Service Directory service to register the forwarding rule under. """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @property @pulumi.getter(name="serviceDirectoryRegion") def service_directory_region(self) -> Optional[pulumi.Input[str]]: """ [Optional] Service Directory region to register this global forwarding rule under. Default to "us-central1". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region. """ return pulumi.get(self, "service_directory_region") @service_directory_region.setter def service_directory_region(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service_directory_region", value) @pulumi.input_type class FutureReservationSpecificSKUPropertiesArgs: def __init__(__self__, *, instance_properties: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs']] = None, total_count: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs'] instance_properties: Properties of the SKU instances being reserved. :param pulumi.Input[str] total_count: Total number of instances for which capacity assurance is requested at a future time period. """ if instance_properties is not None: pulumi.set(__self__, "instance_properties", instance_properties) if total_count is not None: pulumi.set(__self__, "total_count", total_count) @property @pulumi.getter(name="instanceProperties") def instance_properties(self) -> Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs']]: """ Properties of the SKU instances being reserved. """ return pulumi.get(self, "instance_properties") @instance_properties.setter def instance_properties(self, value: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs']]): pulumi.set(self, "instance_properties", value) @property @pulumi.getter(name="totalCount") def total_count(self) -> Optional[pulumi.Input[str]]: """ Total number of instances for which capacity assurance is requested at a future time period. """ return pulumi.get(self, "total_count") @total_count.setter def total_count(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "total_count", value) @pulumi.input_type class FutureReservationTimeWindowArgs: def __init__(__self__, *, duration: Optional[pulumi.Input['DurationArgs']] = None, end_time: Optional[pulumi.Input[str]] = None, start_time: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] start_time: Start time of the Future Reservation. The start_time is an RFC3339 string. """ if duration is not None: pulumi.set(__self__, "duration", duration) if end_time is not None: pulumi.set(__self__, "end_time", end_time) if start_time is not None: pulumi.set(__self__, "start_time", start_time) @property @pulumi.getter def duration(self) -> Optional[pulumi.Input['DurationArgs']]: return pulumi.get(self, "duration") @duration.setter def duration(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "duration", value) @property @pulumi.getter(name="endTime") def end_time(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "end_time") @end_time.setter def end_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "end_time", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ Start time of the Future Reservation. The start_time is an RFC3339 string. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @pulumi.input_type class GRPCHealthCheckArgs: def __init__(__self__, *, grpc_service_name: Optional[pulumi.Input[str]] = None, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, port_specification: Optional[pulumi.Input['GRPCHealthCheckPortSpecification']] = None): """ :param pulumi.Input[str] grpc_service_name: The gRPC service name for the health check. This field is optional. The value of grpc_service_name has the following meanings by convention: - Empty service_name means the overall status of all services at the backend. - Non-empty service_name means the health of that gRPC service, as defined by the owner of the service. The grpc_service_name can only be ASCII. :param pulumi.Input[int] port: The port number for the health check request. Must be specified if port_name and port_specification are not set or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. The port_name should conform to RFC1035. :param pulumi.Input['GRPCHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, gRPC health check follows behavior specified in port and portName fields. """ if grpc_service_name is not None: pulumi.set(__self__, "grpc_service_name", grpc_service_name) if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if port_specification is not None: pulumi.set(__self__, "port_specification", port_specification) @property @pulumi.getter(name="grpcServiceName") def grpc_service_name(self) -> Optional[pulumi.Input[str]]: """ The gRPC service name for the health check. This field is optional. The value of grpc_service_name has the following meanings by convention: - Empty service_name means the overall status of all services at the backend. - Non-empty service_name means the health of that gRPC service, as defined by the owner of the service. The grpc_service_name can only be ASCII. """ return pulumi.get(self, "grpc_service_name") @grpc_service_name.setter def grpc_service_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "grpc_service_name", value) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The port number for the health check request. Must be specified if port_name and port_specification are not set or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. The port_name should conform to RFC1035. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter(name="portSpecification") def port_specification(self) -> Optional[pulumi.Input['GRPCHealthCheckPortSpecification']]: """ Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, gRPC health check follows behavior specified in port and portName fields. """ return pulumi.get(self, "port_specification") @port_specification.setter def port_specification(self, value: Optional[pulumi.Input['GRPCHealthCheckPortSpecification']]): pulumi.set(self, "port_specification", value) @pulumi.input_type class GrpcServiceConfigArgs: def __init__(__self__, *, call_credentials: Optional[pulumi.Input['CallCredentialsArgs']] = None, channel_credentials: Optional[pulumi.Input['ChannelCredentialsArgs']] = None, target_uri: Optional[pulumi.Input[str]] = None): """ [Deprecated] gRPC config to access the SDS server. gRPC config to access the SDS server. :param pulumi.Input['CallCredentialsArgs'] call_credentials: The call credentials to access the SDS server. :param pulumi.Input['ChannelCredentialsArgs'] channel_credentials: The channel credentials to access the SDS server. :param pulumi.Input[str] target_uri: The target URI of the SDS server. """ if call_credentials is not None: pulumi.set(__self__, "call_credentials", call_credentials) if channel_credentials is not None: pulumi.set(__self__, "channel_credentials", channel_credentials) if target_uri is not None: pulumi.set(__self__, "target_uri", target_uri) @property @pulumi.getter(name="callCredentials") def call_credentials(self) -> Optional[pulumi.Input['CallCredentialsArgs']]: """ The call credentials to access the SDS server. """ return pulumi.get(self, "call_credentials") @call_credentials.setter def call_credentials(self, value: Optional[pulumi.Input['CallCredentialsArgs']]): pulumi.set(self, "call_credentials", value) @property @pulumi.getter(name="channelCredentials") def channel_credentials(self) -> Optional[pulumi.Input['ChannelCredentialsArgs']]: """ The channel credentials to access the SDS server. """ return pulumi.get(self, "channel_credentials") @channel_credentials.setter def channel_credentials(self, value: Optional[pulumi.Input['ChannelCredentialsArgs']]): pulumi.set(self, "channel_credentials", value) @property @pulumi.getter(name="targetUri") def target_uri(self) -> Optional[pulumi.Input[str]]: """ The target URI of the SDS server. """ return pulumi.get(self, "target_uri") @target_uri.setter def target_uri(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "target_uri", value) @pulumi.input_type class GuestOsFeatureArgs: def __init__(__self__, *, type: Optional[pulumi.Input['GuestOsFeatureType']] = None): """ Guest OS features. :param pulumi.Input['GuestOsFeatureType'] type: The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - SECURE_BOOT - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE For more information, see Enabling guest operating system features. """ if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['GuestOsFeatureType']]: """ The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - SECURE_BOOT - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE For more information, see Enabling guest operating system features. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['GuestOsFeatureType']]): pulumi.set(self, "type", value) @pulumi.input_type class HTTP2HealthCheckArgs: def __init__(__self__, *, host: Optional[pulumi.Input[str]] = None, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, port_specification: Optional[pulumi.Input['HTTP2HealthCheckPortSpecification']] = None, proxy_header: Optional[pulumi.Input['HTTP2HealthCheckProxyHeader']] = None, request_path: Optional[pulumi.Input[str]] = None, response: Optional[pulumi.Input[str]] = None, weight_report_mode: Optional[pulumi.Input['HTTP2HealthCheckWeightReportMode']] = None): """ :param pulumi.Input[str] host: The value of the host header in the HTTP/2 health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['HTTP2HealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP2 health check follows behavior specified in port and portName fields. :param pulumi.Input['HTTP2HealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request_path: The request path of the HTTP/2 health check request. The default value is /. :param pulumi.Input[str] response: The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. :param pulumi.Input['HTTP2HealthCheckWeightReportMode'] weight_report_mode: Weight report mode. used for weighted Load Balancing. """ if host is not None: pulumi.set(__self__, "host", host) if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if port_specification is not None: pulumi.set(__self__, "port_specification", port_specification) if proxy_header is not None: pulumi.set(__self__, "proxy_header", proxy_header) if request_path is not None: pulumi.set(__self__, "request_path", request_path) if response is not None: pulumi.set(__self__, "response", response) if weight_report_mode is not None: pulumi.set(__self__, "weight_report_mode", weight_report_mode) @property @pulumi.getter def host(self) -> Optional[pulumi.Input[str]]: """ The value of the host header in the HTTP/2 health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. """ return pulumi.get(self, "host") @host.setter def host(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host", value) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter(name="portSpecification") def port_specification(self) -> Optional[pulumi.Input['HTTP2HealthCheckPortSpecification']]: """ Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP2 health check follows behavior specified in port and portName fields. """ return pulumi.get(self, "port_specification") @port_specification.setter def port_specification(self, value: Optional[pulumi.Input['HTTP2HealthCheckPortSpecification']]): pulumi.set(self, "port_specification", value) @property @pulumi.getter(name="proxyHeader") def proxy_header(self) -> Optional[pulumi.Input['HTTP2HealthCheckProxyHeader']]: """ Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. """ return pulumi.get(self, "proxy_header") @proxy_header.setter def proxy_header(self, value: Optional[pulumi.Input['HTTP2HealthCheckProxyHeader']]): pulumi.set(self, "proxy_header", value) @property @pulumi.getter(name="requestPath") def request_path(self) -> Optional[pulumi.Input[str]]: """ The request path of the HTTP/2 health check request. The default value is /. """ return pulumi.get(self, "request_path") @request_path.setter def request_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request_path", value) @property @pulumi.getter def response(self) -> Optional[pulumi.Input[str]]: """ The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. """ return pulumi.get(self, "response") @response.setter def response(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "response", value) @property @pulumi.getter(name="weightReportMode") def weight_report_mode(self) -> Optional[pulumi.Input['HTTP2HealthCheckWeightReportMode']]: """ Weight report mode. used for weighted Load Balancing. """ return pulumi.get(self, "weight_report_mode") @weight_report_mode.setter def weight_report_mode(self, value: Optional[pulumi.Input['HTTP2HealthCheckWeightReportMode']]): pulumi.set(self, "weight_report_mode", value) @pulumi.input_type class HTTPHealthCheckArgs: def __init__(__self__, *, host: Optional[pulumi.Input[str]] = None, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, port_specification: Optional[pulumi.Input['HTTPHealthCheckPortSpecification']] = None, proxy_header: Optional[pulumi.Input['HTTPHealthCheckProxyHeader']] = None, request_path: Optional[pulumi.Input[str]] = None, response: Optional[pulumi.Input[str]] = None, weight_report_mode: Optional[pulumi.Input['HTTPHealthCheckWeightReportMode']] = None): """ :param pulumi.Input[str] host: The value of the host header in the HTTP health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['HTTPHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP health check follows behavior specified in port and portName fields. :param pulumi.Input['HTTPHealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request_path: The request path of the HTTP health check request. The default value is /. :param pulumi.Input[str] response: The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. :param pulumi.Input['HTTPHealthCheckWeightReportMode'] weight_report_mode: Weight report mode. used for weighted Load Balancing. """ if host is not None: pulumi.set(__self__, "host", host) if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if port_specification is not None: pulumi.set(__self__, "port_specification", port_specification) if proxy_header is not None: pulumi.set(__self__, "proxy_header", proxy_header) if request_path is not None: pulumi.set(__self__, "request_path", request_path) if response is not None: pulumi.set(__self__, "response", response) if weight_report_mode is not None: pulumi.set(__self__, "weight_report_mode", weight_report_mode) @property @pulumi.getter def host(self) -> Optional[pulumi.Input[str]]: """ The value of the host header in the HTTP health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. """ return pulumi.get(self, "host") @host.setter def host(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host", value) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter(name="portSpecification") def port_specification(self) -> Optional[pulumi.Input['HTTPHealthCheckPortSpecification']]: """ Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP health check follows behavior specified in port and portName fields. """ return pulumi.get(self, "port_specification") @port_specification.setter def port_specification(self, value: Optional[pulumi.Input['HTTPHealthCheckPortSpecification']]): pulumi.set(self, "port_specification", value) @property @pulumi.getter(name="proxyHeader") def proxy_header(self) -> Optional[pulumi.Input['HTTPHealthCheckProxyHeader']]: """ Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. """ return pulumi.get(self, "proxy_header") @proxy_header.setter def proxy_header(self, value: Optional[pulumi.Input['HTTPHealthCheckProxyHeader']]): pulumi.set(self, "proxy_header", value) @property @pulumi.getter(name="requestPath") def request_path(self) -> Optional[pulumi.Input[str]]: """ The request path of the HTTP health check request. The default value is /. """ return pulumi.get(self, "request_path") @request_path.setter def request_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request_path", value) @property @pulumi.getter def response(self) -> Optional[pulumi.Input[str]]: """ The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. """ return pulumi.get(self, "response") @response.setter def response(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "response", value) @property @pulumi.getter(name="weightReportMode") def weight_report_mode(self) -> Optional[pulumi.Input['HTTPHealthCheckWeightReportMode']]: """ Weight report mode. used for weighted Load Balancing. """ return pulumi.get(self, "weight_report_mode") @weight_report_mode.setter def weight_report_mode(self, value: Optional[pulumi.Input['HTTPHealthCheckWeightReportMode']]): pulumi.set(self, "weight_report_mode", value) @pulumi.input_type class HTTPSHealthCheckArgs: def __init__(__self__, *, host: Optional[pulumi.Input[str]] = None, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, port_specification: Optional[pulumi.Input['HTTPSHealthCheckPortSpecification']] = None, proxy_header: Optional[pulumi.Input['HTTPSHealthCheckProxyHeader']] = None, request_path: Optional[pulumi.Input[str]] = None, response: Optional[pulumi.Input[str]] = None, weight_report_mode: Optional[pulumi.Input['HTTPSHealthCheckWeightReportMode']] = None): """ :param pulumi.Input[str] host: The value of the host header in the HTTPS health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['HTTPSHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTPS health check follows behavior specified in port and portName fields. :param pulumi.Input['HTTPSHealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request_path: The request path of the HTTPS health check request. The default value is /. :param pulumi.Input[str] response: The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. :param pulumi.Input['HTTPSHealthCheckWeightReportMode'] weight_report_mode: Weight report mode. used for weighted Load Balancing. """ if host is not None: pulumi.set(__self__, "host", host) if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if port_specification is not None: pulumi.set(__self__, "port_specification", port_specification) if proxy_header is not None: pulumi.set(__self__, "proxy_header", proxy_header) if request_path is not None: pulumi.set(__self__, "request_path", request_path) if response is not None: pulumi.set(__self__, "response", response) if weight_report_mode is not None: pulumi.set(__self__, "weight_report_mode", weight_report_mode) @property @pulumi.getter def host(self) -> Optional[pulumi.Input[str]]: """ The value of the host header in the HTTPS health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. """ return pulumi.get(self, "host") @host.setter def host(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host", value) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter(name="portSpecification") def port_specification(self) -> Optional[pulumi.Input['HTTPSHealthCheckPortSpecification']]: """ Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTPS health check follows behavior specified in port and portName fields. """ return pulumi.get(self, "port_specification") @port_specification.setter def port_specification(self, value: Optional[pulumi.Input['HTTPSHealthCheckPortSpecification']]): pulumi.set(self, "port_specification", value) @property @pulumi.getter(name="proxyHeader") def proxy_header(self) -> Optional[pulumi.Input['HTTPSHealthCheckProxyHeader']]: """ Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. """ return pulumi.get(self, "proxy_header") @proxy_header.setter def proxy_header(self, value: Optional[pulumi.Input['HTTPSHealthCheckProxyHeader']]): pulumi.set(self, "proxy_header", value) @property @pulumi.getter(name="requestPath") def request_path(self) -> Optional[pulumi.Input[str]]: """ The request path of the HTTPS health check request. The default value is /. """ return pulumi.get(self, "request_path") @request_path.setter def request_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request_path", value) @property @pulumi.getter def response(self) -> Optional[pulumi.Input[str]]: """ The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. """ return pulumi.get(self, "response") @response.setter def response(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "response", value) @property @pulumi.getter(name="weightReportMode") def weight_report_mode(self) -> Optional[pulumi.Input['HTTPSHealthCheckWeightReportMode']]: """ Weight report mode. used for weighted Load Balancing. """ return pulumi.get(self, "weight_report_mode") @weight_report_mode.setter def weight_report_mode(self, value: Optional[pulumi.Input['HTTPSHealthCheckWeightReportMode']]): pulumi.set(self, "weight_report_mode", value) @pulumi.input_type class HealthCheckLogConfigArgs: def __init__(__self__, *, enable: Optional[pulumi.Input[bool]] = None): """ Configuration of logging on a health check. If logging is enabled, logs will be exported to Stackdriver. :param pulumi.Input[bool] enable: Indicates whether or not to export logs. This is false by default, which means no health check logging will be done. """ if enable is not None: pulumi.set(__self__, "enable", enable) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input[bool]]: """ Indicates whether or not to export logs. This is false by default, which means no health check logging will be done. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable", value) @pulumi.input_type class HostRuleArgs: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, hosts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, path_matcher: Optional[pulumi.Input[str]] = None): """ UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[Sequence[pulumi.Input[str]]] hosts: The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or .. * based matching is not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. :param pulumi.Input[str] path_matcher: The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion. """ if description is not None: pulumi.set(__self__, "description", description) if hosts is not None: pulumi.set(__self__, "hosts", hosts) if path_matcher is not None: pulumi.set(__self__, "path_matcher", path_matcher) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def hosts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or .. * based matching is not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. """ return pulumi.get(self, "hosts") @hosts.setter def hosts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "hosts", value) @property @pulumi.getter(name="pathMatcher") def path_matcher(self) -> Optional[pulumi.Input[str]]: """ The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion. """ return pulumi.get(self, "path_matcher") @path_matcher.setter def path_matcher(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "path_matcher", value) @pulumi.input_type class HttpFaultAbortArgs: def __init__(__self__, *, http_status: Optional[pulumi.Input[int]] = None, percentage: Optional[pulumi.Input[float]] = None): """ Specification for how requests are aborted as part of fault injection. :param pulumi.Input[int] http_status: The HTTP status code used to abort the request. The value must be from 200 to 599 inclusive. For gRPC protocol, the gRPC status code is mapped to HTTP status code according to this mapping table. HTTP status 200 is mapped to gRPC status UNKNOWN. Injecting an OK status is currently not supported by Traffic Director. :param pulumi.Input[float] percentage: The percentage of traffic for connections, operations, or requests that is aborted as part of fault injection. The value must be from 0.0 to 100.0 inclusive. """ if http_status is not None: pulumi.set(__self__, "http_status", http_status) if percentage is not None: pulumi.set(__self__, "percentage", percentage) @property @pulumi.getter(name="httpStatus") def http_status(self) -> Optional[pulumi.Input[int]]: """ The HTTP status code used to abort the request. The value must be from 200 to 599 inclusive. For gRPC protocol, the gRPC status code is mapped to HTTP status code according to this mapping table. HTTP status 200 is mapped to gRPC status UNKNOWN. Injecting an OK status is currently not supported by Traffic Director. """ return pulumi.get(self, "http_status") @http_status.setter def http_status(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "http_status", value) @property @pulumi.getter def percentage(self) -> Optional[pulumi.Input[float]]: """ The percentage of traffic for connections, operations, or requests that is aborted as part of fault injection. The value must be from 0.0 to 100.0 inclusive. """ return pulumi.get(self, "percentage") @percentage.setter def percentage(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "percentage", value) @pulumi.input_type class HttpFaultDelayArgs: def __init__(__self__, *, fixed_delay: Optional[pulumi.Input['DurationArgs']] = None, percentage: Optional[pulumi.Input[float]] = None): """ Specifies the delay introduced by the load balancer before forwarding the request to the backend service as part of fault injection. :param pulumi.Input['DurationArgs'] fixed_delay: Specifies the value of the fixed delay interval. :param pulumi.Input[float] percentage: The percentage of traffic for connections, operations, or requests for which a delay is introduced as part of fault injection. The value must be from 0.0 to 100.0 inclusive. """ if fixed_delay is not None: pulumi.set(__self__, "fixed_delay", fixed_delay) if percentage is not None: pulumi.set(__self__, "percentage", percentage) @property @pulumi.getter(name="fixedDelay") def fixed_delay(self) -> Optional[pulumi.Input['DurationArgs']]: """ Specifies the value of the fixed delay interval. """ return pulumi.get(self, "fixed_delay") @fixed_delay.setter def fixed_delay(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "fixed_delay", value) @property @pulumi.getter def percentage(self) -> Optional[pulumi.Input[float]]: """ The percentage of traffic for connections, operations, or requests for which a delay is introduced as part of fault injection. The value must be from 0.0 to 100.0 inclusive. """ return pulumi.get(self, "percentage") @percentage.setter def percentage(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "percentage", value) @pulumi.input_type class HttpFaultInjectionArgs: def __init__(__self__, *, abort: Optional[pulumi.Input['HttpFaultAbortArgs']] = None, delay: Optional[pulumi.Input['HttpFaultDelayArgs']] = None): """ The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by the load balancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests. :param pulumi.Input['HttpFaultAbortArgs'] abort: The specification for how client requests are aborted as part of fault injection. :param pulumi.Input['HttpFaultDelayArgs'] delay: The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. """ if abort is not None: pulumi.set(__self__, "abort", abort) if delay is not None: pulumi.set(__self__, "delay", delay) @property @pulumi.getter def abort(self) -> Optional[pulumi.Input['HttpFaultAbortArgs']]: """ The specification for how client requests are aborted as part of fault injection. """ return pulumi.get(self, "abort") @abort.setter def abort(self, value: Optional[pulumi.Input['HttpFaultAbortArgs']]): pulumi.set(self, "abort", value) @property @pulumi.getter def delay(self) -> Optional[pulumi.Input['HttpFaultDelayArgs']]: """ The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. """ return pulumi.get(self, "delay") @delay.setter def delay(self, value: Optional[pulumi.Input['HttpFaultDelayArgs']]): pulumi.set(self, "delay", value) @pulumi.input_type class HttpFilterConfigArgs: def __init__(__self__, *, config: Optional[pulumi.Input[str]] = None, config_type_url: Optional[pulumi.Input[str]] = None, filter_name: Optional[pulumi.Input[str]] = None): """ HttpFilterConfiguration supplies additional contextual settings for networkservices.HttpFilter resources enabled by Traffic Director. :param pulumi.Input[str] config: The configuration needed to enable the networkservices.HttpFilter resource. The configuration must be YAML formatted and only contain fields defined in the protobuf identified in configTypeUrl :param pulumi.Input[str] config_type_url: The fully qualified versioned proto3 type url of the protobuf that the filter expects for its contextual settings, for example: type.googleapis.com/google.protobuf.Struct :param pulumi.Input[str] filter_name: Name of the networkservices.HttpFilter resource this configuration belongs to. This name must be known to the xDS client. Example: envoy.wasm """ if config is not None: pulumi.set(__self__, "config", config) if config_type_url is not None: pulumi.set(__self__, "config_type_url", config_type_url) if filter_name is not None: pulumi.set(__self__, "filter_name", filter_name) @property @pulumi.getter def config(self) -> Optional[pulumi.Input[str]]: """ The configuration needed to enable the networkservices.HttpFilter resource. The configuration must be YAML formatted and only contain fields defined in the protobuf identified in configTypeUrl """ return pulumi.get(self, "config") @config.setter def config(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "config", value) @property @pulumi.getter(name="configTypeUrl") def config_type_url(self) -> Optional[pulumi.Input[str]]: """ The fully qualified versioned proto3 type url of the protobuf that the filter expects for its contextual settings, for example: type.googleapis.com/google.protobuf.Struct """ return pulumi.get(self, "config_type_url") @config_type_url.setter def config_type_url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "config_type_url", value) @property @pulumi.getter(name="filterName") def filter_name(self) -> Optional[pulumi.Input[str]]: """ Name of the networkservices.HttpFilter resource this configuration belongs to. This name must be known to the xDS client. Example: envoy.wasm """ return pulumi.get(self, "filter_name") @filter_name.setter def filter_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "filter_name", value) @pulumi.input_type class HttpHeaderActionArgs: def __init__(__self__, *, request_headers_to_add: Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]]] = None, request_headers_to_remove: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, response_headers_to_add: Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]]] = None, response_headers_to_remove: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ The request and response header transformations that take effect before the request is passed along to the selected backendService. :param pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]] request_headers_to_add: Headers to add to a matching request before forwarding the request to the backendService. :param pulumi.Input[Sequence[pulumi.Input[str]]] request_headers_to_remove: A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. :param pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]] response_headers_to_add: Headers to add the response before sending the response back to the client. :param pulumi.Input[Sequence[pulumi.Input[str]]] response_headers_to_remove: A list of header names for headers that need to be removed from the response before sending the response back to the client. """ if request_headers_to_add is not None: pulumi.set(__self__, "request_headers_to_add", request_headers_to_add) if request_headers_to_remove is not None: pulumi.set(__self__, "request_headers_to_remove", request_headers_to_remove) if response_headers_to_add is not None: pulumi.set(__self__, "response_headers_to_add", response_headers_to_add) if response_headers_to_remove is not None: pulumi.set(__self__, "response_headers_to_remove", response_headers_to_remove) @property @pulumi.getter(name="requestHeadersToAdd") def request_headers_to_add(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]]]: """ Headers to add to a matching request before forwarding the request to the backendService. """ return pulumi.get(self, "request_headers_to_add") @request_headers_to_add.setter def request_headers_to_add(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]]]): pulumi.set(self, "request_headers_to_add", value) @property @pulumi.getter(name="requestHeadersToRemove") def request_headers_to_remove(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. """ return pulumi.get(self, "request_headers_to_remove") @request_headers_to_remove.setter def request_headers_to_remove(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "request_headers_to_remove", value) @property @pulumi.getter(name="responseHeadersToAdd") def response_headers_to_add(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]]]: """ Headers to add the response before sending the response back to the client. """ return pulumi.get(self, "response_headers_to_add") @response_headers_to_add.setter def response_headers_to_add(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]]]): pulumi.set(self, "response_headers_to_add", value) @property @pulumi.getter(name="responseHeadersToRemove") def response_headers_to_remove(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of header names for headers that need to be removed from the response before sending the response back to the client. """ return pulumi.get(self, "response_headers_to_remove") @response_headers_to_remove.setter def response_headers_to_remove(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "response_headers_to_remove", value) @pulumi.input_type class HttpHeaderMatchArgs: def __init__(__self__, *, exact_match: Optional[pulumi.Input[str]] = None, header_name: Optional[pulumi.Input[str]] = None, invert_match: Optional[pulumi.Input[bool]] = None, prefix_match: Optional[pulumi.Input[str]] = None, present_match: Optional[pulumi.Input[bool]] = None, range_match: Optional[pulumi.Input['Int64RangeMatchArgs']] = None, regex_match: Optional[pulumi.Input[str]] = None, suffix_match: Optional[pulumi.Input[str]] = None): """ matchRule criteria for request header matches. :param pulumi.Input[str] exact_match: The value should exactly match contents of exactMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. :param pulumi.Input[str] header_name: The name of the HTTP header to match. For matching against the HTTP request's authority, use a headerMatch with the header name ":authority". For matching a request's method, use the headerName ":method". When the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true, only non-binary user-specified custom metadata and the `content-type` header are supported. The following transport-level headers cannot be used in header matching rules: `:authority`, `:method`, `:path`, `:scheme`, `user-agent`, `accept-encoding`, `content-encoding`, `grpc-accept-encoding`, `grpc-encoding`, `grpc-previous-rpc-attempts`, `grpc-tags-bin`, `grpc-timeout` and `grpc-trace-bin`. :param pulumi.Input[bool] invert_match: If set to false, the headerMatch is considered a match if the preceding match criteria are met. If set to true, the headerMatch is considered a match if the preceding match criteria are NOT met. The default setting is false. :param pulumi.Input[str] prefix_match: The value of the header must start with the contents of prefixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. :param pulumi.Input[bool] present_match: A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. :param pulumi.Input['Int64RangeMatchArgs'] range_match: The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails. For example for a range [-5, 0] - -3 will match. - 0 will not match. - 0.25 will not match. - -3someString will not match. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. rangeMatch is not supported for load balancers that have loadBalancingScheme set to EXTERNAL. :param pulumi.Input[str] regex_match: The value of the header must match the regular expression specified in regexMatch. For more information about regular expression syntax, see Syntax. For matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. :param pulumi.Input[str] suffix_match: The value of the header must end with the contents of suffixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. """ if exact_match is not None: pulumi.set(__self__, "exact_match", exact_match) if header_name is not None: pulumi.set(__self__, "header_name", header_name) if invert_match is not None: pulumi.set(__self__, "invert_match", invert_match) if prefix_match is not None: pulumi.set(__self__, "prefix_match", prefix_match) if present_match is not None: pulumi.set(__self__, "present_match", present_match) if range_match is not None: pulumi.set(__self__, "range_match", range_match) if regex_match is not None: pulumi.set(__self__, "regex_match", regex_match) if suffix_match is not None: pulumi.set(__self__, "suffix_match", suffix_match) @property @pulumi.getter(name="exactMatch") def exact_match(self) -> Optional[pulumi.Input[str]]: """ The value should exactly match contents of exactMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. """ return pulumi.get(self, "exact_match") @exact_match.setter def exact_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "exact_match", value) @property @pulumi.getter(name="headerName") def header_name(self) -> Optional[pulumi.Input[str]]: """ The name of the HTTP header to match. For matching against the HTTP request's authority, use a headerMatch with the header name ":authority". For matching a request's method, use the headerName ":method". When the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true, only non-binary user-specified custom metadata and the `content-type` header are supported. The following transport-level headers cannot be used in header matching rules: `:authority`, `:method`, `:path`, `:scheme`, `user-agent`, `accept-encoding`, `content-encoding`, `grpc-accept-encoding`, `grpc-encoding`, `grpc-previous-rpc-attempts`, `grpc-tags-bin`, `grpc-timeout` and `grpc-trace-bin`. """ return pulumi.get(self, "header_name") @header_name.setter def header_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_name", value) @property @pulumi.getter(name="invertMatch") def invert_match(self) -> Optional[pulumi.Input[bool]]: """ If set to false, the headerMatch is considered a match if the preceding match criteria are met. If set to true, the headerMatch is considered a match if the preceding match criteria are NOT met. The default setting is false. """ return pulumi.get(self, "invert_match") @invert_match.setter def invert_match(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "invert_match", value) @property @pulumi.getter(name="prefixMatch") def prefix_match(self) -> Optional[pulumi.Input[str]]: """ The value of the header must start with the contents of prefixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. """ return pulumi.get(self, "prefix_match") @prefix_match.setter def prefix_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "prefix_match", value) @property @pulumi.getter(name="presentMatch") def present_match(self) -> Optional[pulumi.Input[bool]]: """ A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. """ return pulumi.get(self, "present_match") @present_match.setter def present_match(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "present_match", value) @property @pulumi.getter(name="rangeMatch") def range_match(self) -> Optional[pulumi.Input['Int64RangeMatchArgs']]: """ The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails. For example for a range [-5, 0] - -3 will match. - 0 will not match. - 0.25 will not match. - -3someString will not match. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. rangeMatch is not supported for load balancers that have loadBalancingScheme set to EXTERNAL. """ return pulumi.get(self, "range_match") @range_match.setter def range_match(self, value: Optional[pulumi.Input['Int64RangeMatchArgs']]): pulumi.set(self, "range_match", value) @property @pulumi.getter(name="regexMatch") def regex_match(self) -> Optional[pulumi.Input[str]]: """ The value of the header must match the regular expression specified in regexMatch. For more information about regular expression syntax, see Syntax. For matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. """ return pulumi.get(self, "regex_match") @regex_match.setter def regex_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "regex_match", value) @property @pulumi.getter(name="suffixMatch") def suffix_match(self) -> Optional[pulumi.Input[str]]: """ The value of the header must end with the contents of suffixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. """ return pulumi.get(self, "suffix_match") @suffix_match.setter def suffix_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "suffix_match", value) @pulumi.input_type class HttpHeaderOptionArgs: def __init__(__self__, *, header_name: Optional[pulumi.Input[str]] = None, header_value: Optional[pulumi.Input[str]] = None, replace: Optional[pulumi.Input[bool]] = None): """ Specification determining how headers are added to requests or responses. :param pulumi.Input[str] header_name: The name of the header. :param pulumi.Input[str] header_value: The value of the header to add. :param pulumi.Input[bool] replace: If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. The default value is false. """ if header_name is not None: pulumi.set(__self__, "header_name", header_name) if header_value is not None: pulumi.set(__self__, "header_value", header_value) if replace is not None: pulumi.set(__self__, "replace", replace) @property @pulumi.getter(name="headerName") def header_name(self) -> Optional[pulumi.Input[str]]: """ The name of the header. """ return pulumi.get(self, "header_name") @header_name.setter def header_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_name", value) @property @pulumi.getter(name="headerValue") def header_value(self) -> Optional[pulumi.Input[str]]: """ The value of the header to add. """ return pulumi.get(self, "header_value") @header_value.setter def header_value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_value", value) @property @pulumi.getter def replace(self) -> Optional[pulumi.Input[bool]]: """ If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. The default value is false. """ return pulumi.get(self, "replace") @replace.setter def replace(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "replace", value) @pulumi.input_type class HttpQueryParameterMatchArgs: def __init__(__self__, *, exact_match: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, present_match: Optional[pulumi.Input[bool]] = None, regex_match: Optional[pulumi.Input[str]] = None): """ HttpRouteRuleMatch criteria for a request's query parameter. :param pulumi.Input[str] exact_match: The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch. Only one of presentMatch, exactMatch, or regexMatch must be set. :param pulumi.Input[str] name: The name of the query parameter to match. The query parameter must exist in the request, in the absence of which the request match fails. :param pulumi.Input[bool] present_match: Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not. Only one of presentMatch, exactMatch, or regexMatch must be set. :param pulumi.Input[str] regex_match: The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For more information about regular expression syntax, see Syntax. Only one of presentMatch, exactMatch, or regexMatch must be set. regexMatch only applies when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED. """ if exact_match is not None: pulumi.set(__self__, "exact_match", exact_match) if name is not None: pulumi.set(__self__, "name", name) if present_match is not None: pulumi.set(__self__, "present_match", present_match) if regex_match is not None: pulumi.set(__self__, "regex_match", regex_match) @property @pulumi.getter(name="exactMatch") def exact_match(self) -> Optional[pulumi.Input[str]]: """ The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch. Only one of presentMatch, exactMatch, or regexMatch must be set. """ return pulumi.get(self, "exact_match") @exact_match.setter def exact_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "exact_match", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the query parameter to match. The query parameter must exist in the request, in the absence of which the request match fails. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="presentMatch") def present_match(self) -> Optional[pulumi.Input[bool]]: """ Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not. Only one of presentMatch, exactMatch, or regexMatch must be set. """ return pulumi.get(self, "present_match") @present_match.setter def present_match(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "present_match", value) @property @pulumi.getter(name="regexMatch") def regex_match(self) -> Optional[pulumi.Input[str]]: """ The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For more information about regular expression syntax, see Syntax. Only one of presentMatch, exactMatch, or regexMatch must be set. regexMatch only applies when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED. """ return pulumi.get(self, "regex_match") @regex_match.setter def regex_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "regex_match", value) @pulumi.input_type class HttpRedirectActionArgs: def __init__(__self__, *, host_redirect: Optional[pulumi.Input[str]] = None, https_redirect: Optional[pulumi.Input[bool]] = None, path_redirect: Optional[pulumi.Input[str]] = None, prefix_redirect: Optional[pulumi.Input[str]] = None, redirect_response_code: Optional[pulumi.Input['HttpRedirectActionRedirectResponseCode']] = None, strip_query: Optional[pulumi.Input[bool]] = None): """ Specifies settings for an HTTP redirect. :param pulumi.Input[str] host_redirect: The host that is used in the redirect response instead of the one that was supplied in the request. The value must be from 1 to 255 characters. :param pulumi.Input[bool] https_redirect: If set to true, the URL scheme in the redirected request is set to HTTPS. If set to false, the URL scheme of the redirected request remains the same as that of the request. This must only be set for URL maps used in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. The default is set to false. :param pulumi.Input[str] path_redirect: The path that is used in the redirect response instead of the one that was supplied in the request. pathRedirect cannot be supplied together with prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters. :param pulumi.Input[str] prefix_redirect: The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request. prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters. :param pulumi.Input['HttpRedirectActionRedirectResponseCode'] redirect_response_code: The HTTP Status code to use for this RedirectAction. Supported values are: - MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - FOUND, which corresponds to 302. - SEE_OTHER which corresponds to 303. - TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method is retained. - PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method is retained. :param pulumi.Input[bool] strip_query: If set to true, any accompanying query portion of the original URL is removed before redirecting the request. If set to false, the query portion of the original URL is retained. The default is set to false. """ if host_redirect is not None: pulumi.set(__self__, "host_redirect", host_redirect) if https_redirect is not None: pulumi.set(__self__, "https_redirect", https_redirect) if path_redirect is not None: pulumi.set(__self__, "path_redirect", path_redirect) if prefix_redirect is not None: pulumi.set(__self__, "prefix_redirect", prefix_redirect) if redirect_response_code is not None: pulumi.set(__self__, "redirect_response_code", redirect_response_code) if strip_query is not None: pulumi.set(__self__, "strip_query", strip_query) @property @pulumi.getter(name="hostRedirect") def host_redirect(self) -> Optional[pulumi.Input[str]]: """ The host that is used in the redirect response instead of the one that was supplied in the request. The value must be from 1 to 255 characters. """ return pulumi.get(self, "host_redirect") @host_redirect.setter def host_redirect(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host_redirect", value) @property @pulumi.getter(name="httpsRedirect") def https_redirect(self) -> Optional[pulumi.Input[bool]]: """ If set to true, the URL scheme in the redirected request is set to HTTPS. If set to false, the URL scheme of the redirected request remains the same as that of the request. This must only be set for URL maps used in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. The default is set to false. """ return pulumi.get(self, "https_redirect") @https_redirect.setter def https_redirect(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "https_redirect", value) @property @pulumi.getter(name="pathRedirect") def path_redirect(self) -> Optional[pulumi.Input[str]]: """ The path that is used in the redirect response instead of the one that was supplied in the request. pathRedirect cannot be supplied together with prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters. """ return pulumi.get(self, "path_redirect") @path_redirect.setter def path_redirect(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "path_redirect", value) @property @pulumi.getter(name="prefixRedirect") def prefix_redirect(self) -> Optional[pulumi.Input[str]]: """ The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request. prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters. """ return pulumi.get(self, "prefix_redirect") @prefix_redirect.setter def prefix_redirect(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "prefix_redirect", value) @property @pulumi.getter(name="redirectResponseCode") def redirect_response_code(self) -> Optional[pulumi.Input['HttpRedirectActionRedirectResponseCode']]: """ The HTTP Status code to use for this RedirectAction. Supported values are: - MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - FOUND, which corresponds to 302. - SEE_OTHER which corresponds to 303. - TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method is retained. - PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method is retained. """ return pulumi.get(self, "redirect_response_code") @redirect_response_code.setter def redirect_response_code(self, value: Optional[pulumi.Input['HttpRedirectActionRedirectResponseCode']]): pulumi.set(self, "redirect_response_code", value) @property @pulumi.getter(name="stripQuery") def strip_query(self) -> Optional[pulumi.Input[bool]]: """ If set to true, any accompanying query portion of the original URL is removed before redirecting the request. If set to false, the query portion of the original URL is retained. The default is set to false. """ return pulumi.get(self, "strip_query") @strip_query.setter def strip_query(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "strip_query", value) @pulumi.input_type class HttpRetryPolicyArgs: def __init__(__self__, *, num_retries: Optional[pulumi.Input[int]] = None, per_try_timeout: Optional[pulumi.Input['DurationArgs']] = None, retry_conditions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ The retry policy associates with HttpRouteRule :param pulumi.Input[int] num_retries: Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. :param pulumi.Input['DurationArgs'] per_try_timeout: Specifies a non-zero timeout per retry attempt. If not specified, will use the timeout set in the HttpRouteAction field. If timeout in the HttpRouteAction field is not set, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input[str]]] retry_conditions: Specifies one or more conditions when this retry policy applies. Valid values are: - 5xx: retry is attempted if the instance or endpoint responds with any 5xx response code, or if the instance or endpoint does not respond at all. For example, disconnects, reset, read timeout, connection failure, and refused streams. - gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. - connect-failure: a retry is attempted on failures connecting to the instance or endpoint. For example, connection timeouts. - retriable-4xx: a retry is attempted if the instance or endpoint responds with a 4xx response code. The only error that you can retry is error code 409. - refused-stream: a retry is attempted if the instance or endpoint resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. - cancelled: a retry is attempted if the gRPC status code in the response header is set to cancelled. - deadline-exceeded: a retry is attempted if the gRPC status code in the response header is set to deadline-exceeded. - internal: a retry is attempted if the gRPC status code in the response header is set to internal. - resource-exhausted: a retry is attempted if the gRPC status code in the response header is set to resource-exhausted. - unavailable: a retry is attempted if the gRPC status code in the response header is set to unavailable. Only the following codes are supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true. - cancelled - deadline-exceeded - internal - resource-exhausted - unavailable """ if num_retries is not None: pulumi.set(__self__, "num_retries", num_retries) if per_try_timeout is not None: pulumi.set(__self__, "per_try_timeout", per_try_timeout) if retry_conditions is not None: pulumi.set(__self__, "retry_conditions", retry_conditions) @property @pulumi.getter(name="numRetries") def num_retries(self) -> Optional[pulumi.Input[int]]: """ Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. """ return pulumi.get(self, "num_retries") @num_retries.setter def num_retries(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "num_retries", value) @property @pulumi.getter(name="perTryTimeout") def per_try_timeout(self) -> Optional[pulumi.Input['DurationArgs']]: """ Specifies a non-zero timeout per retry attempt. If not specified, will use the timeout set in the HttpRouteAction field. If timeout in the HttpRouteAction field is not set, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. """ return pulumi.get(self, "per_try_timeout") @per_try_timeout.setter def per_try_timeout(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "per_try_timeout", value) @property @pulumi.getter(name="retryConditions") def retry_conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies one or more conditions when this retry policy applies. Valid values are: - 5xx: retry is attempted if the instance or endpoint responds with any 5xx response code, or if the instance or endpoint does not respond at all. For example, disconnects, reset, read timeout, connection failure, and refused streams. - gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. - connect-failure: a retry is attempted on failures connecting to the instance or endpoint. For example, connection timeouts. - retriable-4xx: a retry is attempted if the instance or endpoint responds with a 4xx response code. The only error that you can retry is error code 409. - refused-stream: a retry is attempted if the instance or endpoint resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. - cancelled: a retry is attempted if the gRPC status code in the response header is set to cancelled. - deadline-exceeded: a retry is attempted if the gRPC status code in the response header is set to deadline-exceeded. - internal: a retry is attempted if the gRPC status code in the response header is set to internal. - resource-exhausted: a retry is attempted if the gRPC status code in the response header is set to resource-exhausted. - unavailable: a retry is attempted if the gRPC status code in the response header is set to unavailable. Only the following codes are supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true. - cancelled - deadline-exceeded - internal - resource-exhausted - unavailable """ return pulumi.get(self, "retry_conditions") @retry_conditions.setter def retry_conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "retry_conditions", value) @pulumi.input_type class HttpRouteActionArgs: def __init__(__self__, *, cors_policy: Optional[pulumi.Input['CorsPolicyArgs']] = None, fault_injection_policy: Optional[pulumi.Input['HttpFaultInjectionArgs']] = None, max_stream_duration: Optional[pulumi.Input['DurationArgs']] = None, request_mirror_policy: Optional[pulumi.Input['RequestMirrorPolicyArgs']] = None, retry_policy: Optional[pulumi.Input['HttpRetryPolicyArgs']] = None, timeout: Optional[pulumi.Input['DurationArgs']] = None, url_rewrite: Optional[pulumi.Input['UrlRewriteArgs']] = None, weighted_backend_services: Optional[pulumi.Input[Sequence[pulumi.Input['WeightedBackendServiceArgs']]]] = None): """ :param pulumi.Input['CorsPolicyArgs'] cors_policy: The specification for allowing client-side cross-origin requests. For more information about the W3C recommendation for cross-origin resource sharing (CORS), see Fetch API Living Standard. Not supported when the URL map is bound to a target gRPC proxy. :param pulumi.Input['HttpFaultInjectionArgs'] fault_injection_policy: The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by a load balancer on a percentage of requests before sending those requests to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests. For the requests impacted by fault injection, timeout and retry_policy is ignored by clients that are configured with a fault_injection_policy. :param pulumi.Input['DurationArgs'] max_stream_duration: Specifies the maximum duration (timeout) for streams on the selected route. Unlike the timeout field where the timeout duration starts from the time the request has been fully processed (known as *end-of-stream*), the duration in this field is computed from the beginning of the stream until the response has been processed, including all retries. A stream that does not complete in this duration is closed. If not specified, this field uses the maximum maxStreamDuration value among all backend services associated with the route. This field is only allowed if the Url map is used with backend services with loadBalancingScheme set to INTERNAL_SELF_MANAGED. :param pulumi.Input['RequestMirrorPolicyArgs'] request_mirror_policy: Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. The load balancer does not wait for responses from the shadow service. Before sending traffic to the shadow service, the host / authority header is suffixed with -shadow. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. :param pulumi.Input['HttpRetryPolicyArgs'] retry_policy: Specifies the retry policy associated with this route. :param pulumi.Input['DurationArgs'] timeout: Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (known as *end-of-stream*) up until the response has been processed. Timeout includes all retries. If not specified, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input['UrlRewriteArgs'] url_rewrite: The spec to modify the URL of the request, before forwarding the request to the matched service. urlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input['WeightedBackendServiceArgs']]] weighted_backend_services: A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number. After a backend service is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction. """ if cors_policy is not None: pulumi.set(__self__, "cors_policy", cors_policy) if fault_injection_policy is not None: pulumi.set(__self__, "fault_injection_policy", fault_injection_policy) if max_stream_duration is not None: pulumi.set(__self__, "max_stream_duration", max_stream_duration) if request_mirror_policy is not None: pulumi.set(__self__, "request_mirror_policy", request_mirror_policy) if retry_policy is not None: pulumi.set(__self__, "retry_policy", retry_policy) if timeout is not None: pulumi.set(__self__, "timeout", timeout) if url_rewrite is not None: pulumi.set(__self__, "url_rewrite", url_rewrite) if weighted_backend_services is not None: pulumi.set(__self__, "weighted_backend_services", weighted_backend_services) @property @pulumi.getter(name="corsPolicy") def cors_policy(self) -> Optional[pulumi.Input['CorsPolicyArgs']]: """ The specification for allowing client-side cross-origin requests. For more information about the W3C recommendation for cross-origin resource sharing (CORS), see Fetch API Living Standard. Not supported when the URL map is bound to a target gRPC proxy. """ return pulumi.get(self, "cors_policy") @cors_policy.setter def cors_policy(self, value: Optional[pulumi.Input['CorsPolicyArgs']]): pulumi.set(self, "cors_policy", value) @property @pulumi.getter(name="faultInjectionPolicy") def fault_injection_policy(self) -> Optional[pulumi.Input['HttpFaultInjectionArgs']]: """ The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by a load balancer on a percentage of requests before sending those requests to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests. For the requests impacted by fault injection, timeout and retry_policy is ignored by clients that are configured with a fault_injection_policy. """ return pulumi.get(self, "fault_injection_policy") @fault_injection_policy.setter def fault_injection_policy(self, value: Optional[pulumi.Input['HttpFaultInjectionArgs']]): pulumi.set(self, "fault_injection_policy", value) @property @pulumi.getter(name="maxStreamDuration") def max_stream_duration(self) -> Optional[pulumi.Input['DurationArgs']]: """ Specifies the maximum duration (timeout) for streams on the selected route. Unlike the timeout field where the timeout duration starts from the time the request has been fully processed (known as *end-of-stream*), the duration in this field is computed from the beginning of the stream until the response has been processed, including all retries. A stream that does not complete in this duration is closed. If not specified, this field uses the maximum maxStreamDuration value among all backend services associated with the route. This field is only allowed if the Url map is used with backend services with loadBalancingScheme set to INTERNAL_SELF_MANAGED. """ return pulumi.get(self, "max_stream_duration") @max_stream_duration.setter def max_stream_duration(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "max_stream_duration", value) @property @pulumi.getter(name="requestMirrorPolicy") def request_mirror_policy(self) -> Optional[pulumi.Input['RequestMirrorPolicyArgs']]: """ Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. The load balancer does not wait for responses from the shadow service. Before sending traffic to the shadow service, the host / authority header is suffixed with -shadow. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. """ return pulumi.get(self, "request_mirror_policy") @request_mirror_policy.setter def request_mirror_policy(self, value: Optional[pulumi.Input['RequestMirrorPolicyArgs']]): pulumi.set(self, "request_mirror_policy", value) @property @pulumi.getter(name="retryPolicy") def retry_policy(self) -> Optional[pulumi.Input['HttpRetryPolicyArgs']]: """ Specifies the retry policy associated with this route. """ return pulumi.get(self, "retry_policy") @retry_policy.setter def retry_policy(self, value: Optional[pulumi.Input['HttpRetryPolicyArgs']]): pulumi.set(self, "retry_policy", value) @property @pulumi.getter def timeout(self) -> Optional[pulumi.Input['DurationArgs']]: """ Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (known as *end-of-stream*) up until the response has been processed. Timeout includes all retries. If not specified, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "timeout") @timeout.setter def timeout(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "timeout", value) @property @pulumi.getter(name="urlRewrite") def url_rewrite(self) -> Optional[pulumi.Input['UrlRewriteArgs']]: """ The spec to modify the URL of the request, before forwarding the request to the matched service. urlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. """ return pulumi.get(self, "url_rewrite") @url_rewrite.setter def url_rewrite(self, value: Optional[pulumi.Input['UrlRewriteArgs']]): pulumi.set(self, "url_rewrite", value) @property @pulumi.getter(name="weightedBackendServices") def weighted_backend_services(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['WeightedBackendServiceArgs']]]]: """ A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number. After a backend service is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction. """ return pulumi.get(self, "weighted_backend_services") @weighted_backend_services.setter def weighted_backend_services(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['WeightedBackendServiceArgs']]]]): pulumi.set(self, "weighted_backend_services", value) @pulumi.input_type class HttpRouteRuleMatchArgs: def __init__(__self__, *, full_path_match: Optional[pulumi.Input[str]] = None, header_matches: Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderMatchArgs']]]] = None, ignore_case: Optional[pulumi.Input[bool]] = None, metadata_filters: Optional[pulumi.Input[Sequence[pulumi.Input['MetadataFilterArgs']]]] = None, prefix_match: Optional[pulumi.Input[str]] = None, query_parameter_matches: Optional[pulumi.Input[Sequence[pulumi.Input['HttpQueryParameterMatchArgs']]]] = None, regex_match: Optional[pulumi.Input[str]] = None): """ HttpRouteRuleMatch specifies a set of criteria for matching requests to an HttpRouteRule. All specified criteria must be satisfied for a match to occur. :param pulumi.Input[str] full_path_match: For satisfying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL. fullPathMatch must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. :param pulumi.Input[Sequence[pulumi.Input['HttpHeaderMatchArgs']]] header_matches: Specifies a list of header match criteria, all of which must match corresponding headers in the request. :param pulumi.Input[bool] ignore_case: Specifies that prefixMatch and fullPathMatch matches are case sensitive. The default value is false. ignoreCase must not be used with regexMatch. Not supported when the URL map is bound to a target gRPC proxy. :param pulumi.Input[Sequence[pulumi.Input['MetadataFilterArgs']]] metadata_filters: Opaque filter criteria used by the load balancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to the load balancer, xDS clients present node metadata. When there is a match, the relevant routing configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadata filters are specified, all of them need to be satisfied in order to be considered a match. metadataFilters specified here is applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to. metadataFilters only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[str] prefix_match: For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /. The value must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. :param pulumi.Input[Sequence[pulumi.Input['HttpQueryParameterMatchArgs']]] query_parameter_matches: Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request. Not supported when the URL map is bound to a target gRPC proxy. :param pulumi.Input[str] regex_match: For satisfying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For more information about regular expression syntax, see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. """ if full_path_match is not None: pulumi.set(__self__, "full_path_match", full_path_match) if header_matches is not None: pulumi.set(__self__, "header_matches", header_matches) if ignore_case is not None: pulumi.set(__self__, "ignore_case", ignore_case) if metadata_filters is not None: pulumi.set(__self__, "metadata_filters", metadata_filters) if prefix_match is not None: pulumi.set(__self__, "prefix_match", prefix_match) if query_parameter_matches is not None: pulumi.set(__self__, "query_parameter_matches", query_parameter_matches) if regex_match is not None: pulumi.set(__self__, "regex_match", regex_match) @property @pulumi.getter(name="fullPathMatch") def full_path_match(self) -> Optional[pulumi.Input[str]]: """ For satisfying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL. fullPathMatch must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. """ return pulumi.get(self, "full_path_match") @full_path_match.setter def full_path_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "full_path_match", value) @property @pulumi.getter(name="headerMatches") def header_matches(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderMatchArgs']]]]: """ Specifies a list of header match criteria, all of which must match corresponding headers in the request. """ return pulumi.get(self, "header_matches") @header_matches.setter def header_matches(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderMatchArgs']]]]): pulumi.set(self, "header_matches", value) @property @pulumi.getter(name="ignoreCase") def ignore_case(self) -> Optional[pulumi.Input[bool]]: """ Specifies that prefixMatch and fullPathMatch matches are case sensitive. The default value is false. ignoreCase must not be used with regexMatch. Not supported when the URL map is bound to a target gRPC proxy. """ return pulumi.get(self, "ignore_case") @ignore_case.setter def ignore_case(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "ignore_case", value) @property @pulumi.getter(name="metadataFilters") def metadata_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MetadataFilterArgs']]]]: """ Opaque filter criteria used by the load balancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to the load balancer, xDS clients present node metadata. When there is a match, the relevant routing configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadata filters are specified, all of them need to be satisfied in order to be considered a match. metadataFilters specified here is applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to. metadataFilters only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "metadata_filters") @metadata_filters.setter def metadata_filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MetadataFilterArgs']]]]): pulumi.set(self, "metadata_filters", value) @property @pulumi.getter(name="prefixMatch") def prefix_match(self) -> Optional[pulumi.Input[str]]: """ For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /. The value must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. """ return pulumi.get(self, "prefix_match") @prefix_match.setter def prefix_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "prefix_match", value) @property @pulumi.getter(name="queryParameterMatches") def query_parameter_matches(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpQueryParameterMatchArgs']]]]: """ Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request. Not supported when the URL map is bound to a target gRPC proxy. """ return pulumi.get(self, "query_parameter_matches") @query_parameter_matches.setter def query_parameter_matches(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpQueryParameterMatchArgs']]]]): pulumi.set(self, "query_parameter_matches", value) @property @pulumi.getter(name="regexMatch") def regex_match(self) -> Optional[pulumi.Input[str]]: """ For satisfying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For more information about regular expression syntax, see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. """ return pulumi.get(self, "regex_match") @regex_match.setter def regex_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "regex_match", value) @pulumi.input_type class HttpRouteRuleArgs: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, header_action: Optional[pulumi.Input['HttpHeaderActionArgs']] = None, http_filter_configs: Optional[pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]]] = None, http_filter_metadata: Optional[pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]]] = None, match_rules: Optional[pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleMatchArgs']]]] = None, priority: Optional[pulumi.Input[int]] = None, route_action: Optional[pulumi.Input['HttpRouteActionArgs']] = None, service: Optional[pulumi.Input[str]] = None, url_redirect: Optional[pulumi.Input['HttpRedirectActionArgs']] = None): """ The HttpRouteRule setting specifies how to match an HTTP request and the corresponding routing action that load balancing proxies perform. :param pulumi.Input[str] description: The short description conveying the intent of this routeRule. The description can have a maximum length of 1024 characters. :param pulumi.Input['HttpHeaderActionArgs'] header_action: Specifies changes to request and response headers that need to take effect for the selected backendService. The headerAction value specified here is applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]] http_filter_configs: Outbound route specific configuration for networkservices.HttpFilter resources enabled by Traffic Director. httpFilterConfigs only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]] http_filter_metadata: Outbound route specific metadata supplied to networkservices.HttpFilter resources enabled by Traffic Director. httpFilterMetadata only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. The only configTypeUrl supported is type.googleapis.com/google.protobuf.Struct Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleMatchArgs']]] match_rules: The list of criteria for matching attributes of a request to this routeRule. This list has OR semantics: the request matches this routeRule when any of the matchRules are satisfied. However predicates within a given matchRule have AND semantics. All predicates within a matchRule must match for the request to match the rule. :param pulumi.Input[int] priority: For routeRules within a given pathMatcher, priority determines the order in which a load balancer interprets routeRules. RouteRules are evaluated in order of priority, from the lowest to highest number. The priority of a rule decreases as its number increases (1, 2, 3, N+1). The first rule that matches the request is applied. You cannot configure two or more routeRules with the same priority. Priority for each rule must be set to a number from 0 to 2147483647 inclusive. Priority numbers can have gaps, which enable you to add or remove rules in the future without affecting the rest of the rules. For example, 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the future without any impact on existing rules. :param pulumi.Input['HttpRouteActionArgs'] route_action: In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a route rule's routeAction. :param pulumi.Input[str] service: The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. :param pulumi.Input['HttpRedirectActionArgs'] url_redirect: When this rule is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. """ if description is not None: pulumi.set(__self__, "description", description) if header_action is not None: pulumi.set(__self__, "header_action", header_action) if http_filter_configs is not None: pulumi.set(__self__, "http_filter_configs", http_filter_configs) if http_filter_metadata is not None: pulumi.set(__self__, "http_filter_metadata", http_filter_metadata) if match_rules is not None: pulumi.set(__self__, "match_rules", match_rules) if priority is not None: pulumi.set(__self__, "priority", priority) if route_action is not None: pulumi.set(__self__, "route_action", route_action) if service is not None: pulumi.set(__self__, "service", service) if url_redirect is not None: pulumi.set(__self__, "url_redirect", url_redirect) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ The short description conveying the intent of this routeRule. The description can have a maximum length of 1024 characters. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="headerAction") def header_action(self) -> Optional[pulumi.Input['HttpHeaderActionArgs']]: """ Specifies changes to request and response headers that need to take effect for the selected backendService. The headerAction value specified here is applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "header_action") @header_action.setter def header_action(self, value: Optional[pulumi.Input['HttpHeaderActionArgs']]): pulumi.set(self, "header_action", value) @property @pulumi.getter(name="httpFilterConfigs") def http_filter_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]]]: """ Outbound route specific configuration for networkservices.HttpFilter resources enabled by Traffic Director. httpFilterConfigs only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "http_filter_configs") @http_filter_configs.setter def http_filter_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]]]): pulumi.set(self, "http_filter_configs", value) @property @pulumi.getter(name="httpFilterMetadata") def http_filter_metadata(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]]]: """ Outbound route specific metadata supplied to networkservices.HttpFilter resources enabled by Traffic Director. httpFilterMetadata only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. The only configTypeUrl supported is type.googleapis.com/google.protobuf.Struct Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "http_filter_metadata") @http_filter_metadata.setter def http_filter_metadata(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]]]): pulumi.set(self, "http_filter_metadata", value) @property @pulumi.getter(name="matchRules") def match_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleMatchArgs']]]]: """ The list of criteria for matching attributes of a request to this routeRule. This list has OR semantics: the request matches this routeRule when any of the matchRules are satisfied. However predicates within a given matchRule have AND semantics. All predicates within a matchRule must match for the request to match the rule. """ return pulumi.get(self, "match_rules") @match_rules.setter def match_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleMatchArgs']]]]): pulumi.set(self, "match_rules", value) @property @pulumi.getter def priority(self) -> Optional[pulumi.Input[int]]: """ For routeRules within a given pathMatcher, priority determines the order in which a load balancer interprets routeRules. RouteRules are evaluated in order of priority, from the lowest to highest number. The priority of a rule decreases as its number increases (1, 2, 3, N+1). The first rule that matches the request is applied. You cannot configure two or more routeRules with the same priority. Priority for each rule must be set to a number from 0 to 2147483647 inclusive. Priority numbers can have gaps, which enable you to add or remove rules in the future without affecting the rest of the rules. For example, 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the future without any impact on existing rules. """ return pulumi.get(self, "priority") @priority.setter def priority(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "priority", value) @property @pulumi.getter(name="routeAction") def route_action(self) -> Optional[pulumi.Input['HttpRouteActionArgs']]: """ In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a route rule's routeAction. """ return pulumi.get(self, "route_action") @route_action.setter def route_action(self, value: Optional[pulumi.Input['HttpRouteActionArgs']]): pulumi.set(self, "route_action", value) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @property @pulumi.getter(name="urlRedirect") def url_redirect(self) -> Optional[pulumi.Input['HttpRedirectActionArgs']]: """ When this rule is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. """ return pulumi.get(self, "url_redirect") @url_redirect.setter def url_redirect(self, value: Optional[pulumi.Input['HttpRedirectActionArgs']]): pulumi.set(self, "url_redirect", value) @pulumi.input_type class ImageRawDiskArgs: def __init__(__self__, *, container_type: Optional[pulumi.Input['ImageRawDiskContainerType']] = None, source: Optional[pulumi.Input[str]] = None): """ The parameters of the raw disk image. :param pulumi.Input['ImageRawDiskContainerType'] container_type: The format used to encode and transmit the block device, which should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created. :param pulumi.Input[str] source: The full Google Cloud Storage URL where the raw disk image archive is stored. The following are valid formats for the URL: - https://storage.googleapis.com/bucket_name/image_archive_name - https://storage.googleapis.com/bucket_name/folder_name/ image_archive_name In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL """ if container_type is not None: pulumi.set(__self__, "container_type", container_type) if source is not None: pulumi.set(__self__, "source", source) @property @pulumi.getter(name="containerType") def container_type(self) -> Optional[pulumi.Input['ImageRawDiskContainerType']]: """ The format used to encode and transmit the block device, which should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created. """ return pulumi.get(self, "container_type") @container_type.setter def container_type(self, value: Optional[pulumi.Input['ImageRawDiskContainerType']]): pulumi.set(self, "container_type", value) @property @pulumi.getter def source(self) -> Optional[pulumi.Input[str]]: """ The full Google Cloud Storage URL where the raw disk image archive is stored. The following are valid formats for the URL: - https://storage.googleapis.com/bucket_name/image_archive_name - https://storage.googleapis.com/bucket_name/folder_name/ image_archive_name In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL """ return pulumi.get(self, "source") @source.setter def source(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source", value) @pulumi.input_type class InitialStateConfigArgs: def __init__(__self__, *, dbs: Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]] = None, dbxs: Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]] = None, keks: Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]] = None, pk: Optional[pulumi.Input['FileContentBufferArgs']] = None): """ Initial State for shielded instance, these are public keys which are safe to store in public :param pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]] dbs: The Key Database (db). :param pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]] dbxs: The forbidden key database (dbx). :param pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]] keks: The Key Exchange Key (KEK). :param pulumi.Input['FileContentBufferArgs'] pk: The Platform Key (PK). """ if dbs is not None: pulumi.set(__self__, "dbs", dbs) if dbxs is not None: pulumi.set(__self__, "dbxs", dbxs) if keks is not None: pulumi.set(__self__, "keks", keks) if pk is not None: pulumi.set(__self__, "pk", pk) @property @pulumi.getter def dbs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]]: """ The Key Database (db). """ return pulumi.get(self, "dbs") @dbs.setter def dbs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]]): pulumi.set(self, "dbs", value) @property @pulumi.getter def dbxs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]]: """ The forbidden key database (dbx). """ return pulumi.get(self, "dbxs") @dbxs.setter def dbxs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]]): pulumi.set(self, "dbxs", value) @property @pulumi.getter def keks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]]: """ The Key Exchange Key (KEK). """ return pulumi.get(self, "keks") @keks.setter def keks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]]): pulumi.set(self, "keks", value) @property @pulumi.getter def pk(self) -> Optional[pulumi.Input['FileContentBufferArgs']]: """ The Platform Key (PK). """ return pulumi.get(self, "pk") @pk.setter def pk(self, value: Optional[pulumi.Input['FileContentBufferArgs']]): pulumi.set(self, "pk", value) @pulumi.input_type class InstanceGroupManagerAllInstancesConfigArgs: def __init__(__self__, *, properties: Optional[pulumi.Input['InstancePropertiesPatchArgs']] = None): """ :param pulumi.Input['InstancePropertiesPatchArgs'] properties: Properties for instances that are created using this instances config. You can add or modify properties using the instanceGroupManagers.patch or regionInstanceGroupManagers.patch. After setting instances_config, you must update your instances to use it; for example, you can use the applyUpdatesToInstances method. """ if properties is not None: pulumi.set(__self__, "properties", properties) @property @pulumi.getter def properties(self) -> Optional[pulumi.Input['InstancePropertiesPatchArgs']]: """ Properties for instances that are created using this instances config. You can add or modify properties using the instanceGroupManagers.patch or regionInstanceGroupManagers.patch. After setting instances_config, you must update your instances to use it; for example, you can use the applyUpdatesToInstances method. """ return pulumi.get(self, "properties") @properties.setter def properties(self, value: Optional[pulumi.Input['InstancePropertiesPatchArgs']]): pulumi.set(self, "properties", value) @pulumi.input_type class InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersArgs: def __init__(__self__, *, on_health_check: Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersOnHealthCheck']] = None): """ :param pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersOnHealthCheck'] on_health_check: If you have configured an application-based health check for the group, this field controls whether to trigger VM autohealing based on a failed health check. Valid values are: - ON (default): The group recreates running VMs that fail the application-based health check. - OFF: When set to OFF, you can still observe instance health state, but the group does not recreate VMs that fail the application-based health check. This is useful for troubleshooting and setting up your health check configuration. """ if on_health_check is not None: pulumi.set(__self__, "on_health_check", on_health_check) @property @pulumi.getter(name="onHealthCheck") def on_health_check(self) -> Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersOnHealthCheck']]: """ If you have configured an application-based health check for the group, this field controls whether to trigger VM autohealing based on a failed health check. Valid values are: - ON (default): The group recreates running VMs that fail the application-based health check. - OFF: When set to OFF, you can still observe instance health state, but the group does not recreate VMs that fail the application-based health check. This is useful for troubleshooting and setting up your health check configuration. """ return pulumi.get(self, "on_health_check") @on_health_check.setter def on_health_check(self, value: Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersOnHealthCheck']]): pulumi.set(self, "on_health_check", value) @pulumi.input_type class InstanceGroupManagerAutoHealingPolicyArgs: def __init__(__self__, *, auto_healing_triggers: Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersArgs']] = None, health_check: Optional[pulumi.Input[str]] = None, initial_delay_sec: Optional[pulumi.Input[int]] = None, max_unavailable: Optional[pulumi.Input['FixedOrPercentArgs']] = None, update_instances: Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyUpdateInstances']] = None): """ :param pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersArgs'] auto_healing_triggers: Restricts what triggers autohealing. :param pulumi.Input[str] health_check: The URL for the health check that signals autohealing. :param pulumi.Input[int] initial_delay_sec: The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600]. :param pulumi.Input['FixedOrPercentArgs'] max_unavailable: Maximum number of instances that can be unavailable when autohealing. When 'percent' is used, the value is rounded if necessary. The instance is considered available if all of the following conditions are satisfied: 1. Instance's status is RUNNING. 2. Instance's currentAction is NONE (in particular its liveness health check result was observed to be HEALTHY at least once as it passed VERIFYING). 3. There is no outgoing action on an instance triggered by IGM. By default, number of concurrently autohealed instances is smaller than the managed instance group target size. However, if a zonal managed instance group has only one instance, or a regional managed instance group has only one instance per zone, autohealing will recreate these instances when they become unhealthy. """ if auto_healing_triggers is not None: pulumi.set(__self__, "auto_healing_triggers", auto_healing_triggers) if health_check is not None: pulumi.set(__self__, "health_check", health_check) if initial_delay_sec is not None: pulumi.set(__self__, "initial_delay_sec", initial_delay_sec) if max_unavailable is not None: pulumi.set(__self__, "max_unavailable", max_unavailable) if update_instances is not None: pulumi.set(__self__, "update_instances", update_instances) @property @pulumi.getter(name="autoHealingTriggers") def auto_healing_triggers(self) -> Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersArgs']]: """ Restricts what triggers autohealing. """ return pulumi.get(self, "auto_healing_triggers") @auto_healing_triggers.setter def auto_healing_triggers(self, value: Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersArgs']]): pulumi.set(self, "auto_healing_triggers", value) @property @pulumi.getter(name="healthCheck") def health_check(self) -> Optional[pulumi.Input[str]]: """ The URL for the health check that signals autohealing. """ return pulumi.get(self, "health_check") @health_check.setter def health_check(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "health_check", value) @property @pulumi.getter(name="initialDelaySec") def initial_delay_sec(self) -> Optional[pulumi.Input[int]]: """ The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600]. """ return pulumi.get(self, "initial_delay_sec") @initial_delay_sec.setter def initial_delay_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "initial_delay_sec", value) @property @pulumi.getter(name="maxUnavailable") def max_unavailable(self) -> Optional[pulumi.Input['FixedOrPercentArgs']]: """ Maximum number of instances that can be unavailable when autohealing. When 'percent' is used, the value is rounded if necessary. The instance is considered available if all of the following conditions are satisfied: 1. Instance's status is RUNNING. 2. Instance's currentAction is NONE (in particular its liveness health check result was observed to be HEALTHY at least once as it passed VERIFYING). 3. There is no outgoing action on an instance triggered by IGM. By default, number of concurrently autohealed instances is smaller than the managed instance group target size. However, if a zonal managed instance group has only one instance, or a regional managed instance group has only one instance per zone, autohealing will recreate these instances when they become unhealthy. """ return pulumi.get(self, "max_unavailable") @max_unavailable.setter def max_unavailable(self, value: Optional[pulumi.Input['FixedOrPercentArgs']]): pulumi.set(self, "max_unavailable", value) @property @pulumi.getter(name="updateInstances") def update_instances(self) -> Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyUpdateInstances']]: return pulumi.get(self, "update_instances") @update_instances.setter def update_instances(self, value: Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyUpdateInstances']]): pulumi.set(self, "update_instances", value) @pulumi.input_type class InstanceGroupManagerInstanceLifecyclePolicyMetadataBasedReadinessSignalArgs: def __init__(__self__, *, timeout_sec: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] timeout_sec: The number of seconds to wait for a readiness signal during initialization before timing out. """ if timeout_sec is not None: pulumi.set(__self__, "timeout_sec", timeout_sec) @property @pulumi.getter(name="timeoutSec") def timeout_sec(self) -> Optional[pulumi.Input[int]]: """ The number of seconds to wait for a readiness signal during initialization before timing out. """ return pulumi.get(self, "timeout_sec") @timeout_sec.setter def timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "timeout_sec", value) @pulumi.input_type class InstanceGroupManagerInstanceLifecyclePolicyArgs: def __init__(__self__, *, metadata_based_readiness_signal: Optional[pulumi.Input['InstanceGroupManagerInstanceLifecyclePolicyMetadataBasedReadinessSignalArgs']] = None): """ :param pulumi.Input['InstanceGroupManagerInstanceLifecyclePolicyMetadataBasedReadinessSignalArgs'] metadata_based_readiness_signal: The configuration for metadata based readiness signal sent by the instance during initialization when stopping / suspending an instance. The Instance Group Manager will wait for a signal that indicates successful initialization before stopping / suspending an instance. If a successful readiness signal is not sent before timeout, the corresponding instance will not be stopped / suspended. Instead, an error will be visible in the lastAttempt.errors field of the managed instance in the listmanagedinstances method. If metadataBasedReadinessSignal.timeoutSec is unset, the Instance Group Manager will directly proceed to suspend / stop instances, skipping initialization on them. """ if metadata_based_readiness_signal is not None: pulumi.set(__self__, "metadata_based_readiness_signal", metadata_based_readiness_signal) @property @pulumi.getter(name="metadataBasedReadinessSignal") def metadata_based_readiness_signal(self) -> Optional[pulumi.Input['InstanceGroupManagerInstanceLifecyclePolicyMetadataBasedReadinessSignalArgs']]: """ The configuration for metadata based readiness signal sent by the instance during initialization when stopping / suspending an instance. The Instance Group Manager will wait for a signal that indicates successful initialization before stopping / suspending an instance. If a successful readiness signal is not sent before timeout, the corresponding instance will not be stopped / suspended. Instead, an error will be visible in the lastAttempt.errors field of the managed instance in the listmanagedinstances method. If metadataBasedReadinessSignal.timeoutSec is unset, the Instance Group Manager will directly proceed to suspend / stop instances, skipping initialization on them. """ return pulumi.get(self, "metadata_based_readiness_signal") @metadata_based_readiness_signal.setter def metadata_based_readiness_signal(self, value: Optional[pulumi.Input['InstanceGroupManagerInstanceLifecyclePolicyMetadataBasedReadinessSignalArgs']]): pulumi.set(self, "metadata_based_readiness_signal", value) @pulumi.input_type class InstanceGroupManagerStandbyPolicyArgs: def __init__(__self__, *, initial_delay_sec: Optional[pulumi.Input[int]] = None): if initial_delay_sec is not None: pulumi.set(__self__, "initial_delay_sec", initial_delay_sec) @property @pulumi.getter(name="initialDelaySec") def initial_delay_sec(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "initial_delay_sec") @initial_delay_sec.setter def initial_delay_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "initial_delay_sec", value) @pulumi.input_type class InstanceGroupManagerUpdatePolicyArgs: def __init__(__self__, *, instance_redistribution_type: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyInstanceRedistributionType']] = None, max_surge: Optional[pulumi.Input['FixedOrPercentArgs']] = None, max_unavailable: Optional[pulumi.Input['FixedOrPercentArgs']] = None, min_ready_sec: Optional[pulumi.Input[int]] = None, minimal_action: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyMinimalAction']] = None, most_disruptive_allowed_action: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedAction']] = None, replacement_method: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyReplacementMethod']] = None, type: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyType']] = None): """ :param pulumi.Input['InstanceGroupManagerUpdatePolicyInstanceRedistributionType'] instance_redistribution_type: The instance redistribution policy for regional managed instance groups. Valid values are: - PROACTIVE (default): The group attempts to maintain an even distribution of VM instances across zones in the region. - NONE: For non-autoscaled groups, proactive redistribution is disabled. :param pulumi.Input['FixedOrPercentArgs'] max_surge: The maximum number of instances that can be created above the specified targetSize during the update process. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxSurge is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge. :param pulumi.Input['FixedOrPercentArgs'] max_unavailable: The maximum number of instances that can be unavailable during the update process. An instance is considered available if all of the following conditions are satisfied: - The instance's status is RUNNING. - If there is a health check on the instance group, the instance's health check status must be HEALTHY at least once. If there is no health check on the group, then the instance only needs to have a status of RUNNING to be considered available. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxUnavailable is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxUnavailable. :param pulumi.Input[int] min_ready_sec: Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600]. :param pulumi.Input['InstanceGroupManagerUpdatePolicyMinimalAction'] minimal_action: Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a RESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action. :param pulumi.Input['InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedAction'] most_disruptive_allowed_action: Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all. :param pulumi.Input['InstanceGroupManagerUpdatePolicyReplacementMethod'] replacement_method: What action should be used to replace instances. See minimal_action.REPLACE :param pulumi.Input['InstanceGroupManagerUpdatePolicyType'] type: The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls). """ if instance_redistribution_type is not None: pulumi.set(__self__, "instance_redistribution_type", instance_redistribution_type) if max_surge is not None: pulumi.set(__self__, "max_surge", max_surge) if max_unavailable is not None: pulumi.set(__self__, "max_unavailable", max_unavailable) if min_ready_sec is not None: pulumi.set(__self__, "min_ready_sec", min_ready_sec) if minimal_action is not None: pulumi.set(__self__, "minimal_action", minimal_action) if most_disruptive_allowed_action is not None: pulumi.set(__self__, "most_disruptive_allowed_action", most_disruptive_allowed_action) if replacement_method is not None: pulumi.set(__self__, "replacement_method", replacement_method) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="instanceRedistributionType") def instance_redistribution_type(self) -> Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyInstanceRedistributionType']]: """ The instance redistribution policy for regional managed instance groups. Valid values are: - PROACTIVE (default): The group attempts to maintain an even distribution of VM instances across zones in the region. - NONE: For non-autoscaled groups, proactive redistribution is disabled. """ return pulumi.get(self, "instance_redistribution_type") @instance_redistribution_type.setter def instance_redistribution_type(self, value: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyInstanceRedistributionType']]): pulumi.set(self, "instance_redistribution_type", value) @property @pulumi.getter(name="maxSurge") def max_surge(self) -> Optional[pulumi.Input['FixedOrPercentArgs']]: """ The maximum number of instances that can be created above the specified targetSize during the update process. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxSurge is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge. """ return pulumi.get(self, "max_surge") @max_surge.setter def max_surge(self, value: Optional[pulumi.Input['FixedOrPercentArgs']]): pulumi.set(self, "max_surge", value) @property @pulumi.getter(name="maxUnavailable") def max_unavailable(self) -> Optional[pulumi.Input['FixedOrPercentArgs']]: """ The maximum number of instances that can be unavailable during the update process. An instance is considered available if all of the following conditions are satisfied: - The instance's status is RUNNING. - If there is a health check on the instance group, the instance's health check status must be HEALTHY at least once. If there is no health check on the group, then the instance only needs to have a status of RUNNING to be considered available. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxUnavailable is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxUnavailable. """ return pulumi.get(self, "max_unavailable") @max_unavailable.setter def max_unavailable(self, value: Optional[pulumi.Input['FixedOrPercentArgs']]): pulumi.set(self, "max_unavailable", value) @property @pulumi.getter(name="minReadySec") def min_ready_sec(self) -> Optional[pulumi.Input[int]]: """ Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600]. """ return pulumi.get(self, "min_ready_sec") @min_ready_sec.setter def min_ready_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_ready_sec", value) @property @pulumi.getter(name="minimalAction") def minimal_action(self) -> Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyMinimalAction']]: """ Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a RESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action. """ return pulumi.get(self, "minimal_action") @minimal_action.setter def minimal_action(self, value: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyMinimalAction']]): pulumi.set(self, "minimal_action", value) @property @pulumi.getter(name="mostDisruptiveAllowedAction") def most_disruptive_allowed_action(self) -> Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedAction']]: """ Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all. """ return pulumi.get(self, "most_disruptive_allowed_action") @most_disruptive_allowed_action.setter def most_disruptive_allowed_action(self, value: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedAction']]): pulumi.set(self, "most_disruptive_allowed_action", value) @property @pulumi.getter(name="replacementMethod") def replacement_method(self) -> Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyReplacementMethod']]: """ What action should be used to replace instances. See minimal_action.REPLACE """ return pulumi.get(self, "replacement_method") @replacement_method.setter def replacement_method(self, value: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyReplacementMethod']]): pulumi.set(self, "replacement_method", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyType']]: """ The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls). """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyType']]): pulumi.set(self, "type", value) @pulumi.input_type class InstanceGroupManagerVersionArgs: def __init__(__self__, *, instance_template: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, target_size: Optional[pulumi.Input['FixedOrPercentArgs']] = None): """ :param pulumi.Input[str] instance_template: The URL of the instance template that is specified for this managed instance group. The group uses this template to create new instances in the managed instance group until the `targetSize` for this version is reached. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE; in those cases, existing instances are updated until the `targetSize` for this version is reached. :param pulumi.Input[str] name: Name of the version. Unique among all versions in the scope of this managed instance group. :param pulumi.Input['FixedOrPercentArgs'] target_size: Specifies the intended number of instances to be created from the instanceTemplate. The final number of instances created from the template will be equal to: - If expressed as a fixed number, the minimum of either targetSize.fixed or instanceGroupManager.targetSize is used. - if expressed as a percent, the targetSize would be (targetSize.percent/100 * InstanceGroupManager.targetSize) If there is a remainder, the number is rounded. If unset, this version will update any remaining instances not updated by another version. Read Starting a canary update for more information. """ if instance_template is not None: pulumi.set(__self__, "instance_template", instance_template) if name is not None: pulumi.set(__self__, "name", name) if target_size is not None: pulumi.set(__self__, "target_size", target_size) @property @pulumi.getter(name="instanceTemplate") def instance_template(self) -> Optional[pulumi.Input[str]]: """ The URL of the instance template that is specified for this managed instance group. The group uses this template to create new instances in the managed instance group until the `targetSize` for this version is reached. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE; in those cases, existing instances are updated until the `targetSize` for this version is reached. """ return pulumi.get(self, "instance_template") @instance_template.setter def instance_template(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "instance_template", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the version. Unique among all versions in the scope of this managed instance group. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="targetSize") def target_size(self) -> Optional[pulumi.Input['FixedOrPercentArgs']]: """ Specifies the intended number of instances to be created from the instanceTemplate. The final number of instances created from the template will be equal to: - If expressed as a fixed number, the minimum of either targetSize.fixed or instanceGroupManager.targetSize is used. - if expressed as a percent, the targetSize would be (targetSize.percent/100 * InstanceGroupManager.targetSize) If there is a remainder, the number is rounded. If unset, this version will update any remaining instances not updated by another version. Read Starting a canary update for more information. """ return pulumi.get(self, "target_size") @target_size.setter def target_size(self, value: Optional[pulumi.Input['FixedOrPercentArgs']]): pulumi.set(self, "target_size", value) @pulumi.input_type class InstanceParamsArgs: def __init__(__self__, *, resource_manager_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ Additional instance params. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_manager_tags: Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. """ if resource_manager_tags is not None: pulumi.set(__self__, "resource_manager_tags", resource_manager_tags) @property @pulumi.getter(name="resourceManagerTags") def resource_manager_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. """ return pulumi.get(self, "resource_manager_tags") @resource_manager_tags.setter def resource_manager_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "resource_manager_tags", value) @pulumi.input_type class InstancePropertiesPatchArgs: def __init__(__self__, *, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ Represents the change that you want to make to the instance properties. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The label key-value pairs that you want to patch onto the instance. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata. """ if labels is not None: pulumi.set(__self__, "labels", labels) if metadata is not None: pulumi.set(__self__, "metadata", metadata) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ The label key-value pairs that you want to patch onto the instance. """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata. """ return pulumi.get(self, "metadata") @metadata.setter def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "metadata", value) @pulumi.input_type class InstancePropertiesArgs: def __init__(__self__, *, advanced_machine_features: Optional[pulumi.Input['AdvancedMachineFeaturesArgs']] = None, can_ip_forward: Optional[pulumi.Input[bool]] = None, confidential_instance_config: Optional[pulumi.Input['ConfidentialInstanceConfigArgs']] = None, description: Optional[pulumi.Input[str]] = None, disks: Optional[pulumi.Input[Sequence[pulumi.Input['AttachedDiskArgs']]]] = None, display_device: Optional[pulumi.Input['DisplayDeviceArgs']] = None, guest_accelerators: Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]] = None, key_revocation_action_type: Optional[pulumi.Input['InstancePropertiesKeyRevocationActionType']] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, machine_type: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input['MetadataArgs']] = None, min_cpu_platform: Optional[pulumi.Input[str]] = None, network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceArgs']]]] = None, network_performance_config: Optional[pulumi.Input['NetworkPerformanceConfigArgs']] = None, post_key_revocation_action_type: Optional[pulumi.Input['InstancePropertiesPostKeyRevocationActionType']] = None, private_ipv6_google_access: Optional[pulumi.Input['InstancePropertiesPrivateIpv6GoogleAccess']] = None, reservation_affinity: Optional[pulumi.Input['ReservationAffinityArgs']] = None, resource_manager_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, resource_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, scheduling: Optional[pulumi.Input['SchedulingArgs']] = None, secure_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, service_accounts: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceAccountArgs']]]] = None, shielded_instance_config: Optional[pulumi.Input['ShieldedInstanceConfigArgs']] = None, shielded_vm_config: Optional[pulumi.Input['ShieldedVmConfigArgs']] = None, tags: Optional[pulumi.Input['TagsArgs']] = None): """ :param pulumi.Input['AdvancedMachineFeaturesArgs'] advanced_machine_features: Controls for advanced machine-related behavior features. Note that for MachineImage, this is not supported yet. :param pulumi.Input[bool] can_ip_forward: Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information. :param pulumi.Input['ConfidentialInstanceConfigArgs'] confidential_instance_config: Specifies the Confidential Instance options. Note that for MachineImage, this is not supported yet. :param pulumi.Input[str] description: An optional text description for the instances that are created from these properties. :param pulumi.Input[Sequence[pulumi.Input['AttachedDiskArgs']]] disks: An array of disks that are associated with the instances that are created from these properties. :param pulumi.Input['DisplayDeviceArgs'] display_device: Display Device properties to enable support for remote display products like: Teradici, VNC and TeamViewer Note that for MachineImage, this is not supported yet. :param pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]] guest_accelerators: A list of guest accelerator cards' type and count to use for instances created from these properties. :param pulumi.Input['InstancePropertiesKeyRevocationActionType'] key_revocation_action_type: KeyRevocationActionType of the instance. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to instances that are created from these properties. :param pulumi.Input[str] machine_type: The machine type to use for instances that are created from these properties. :param pulumi.Input['MetadataArgs'] metadata: The metadata key/value pairs to assign to instances that are created from these properties. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information. :param pulumi.Input[str] min_cpu_platform: Minimum cpu/platform to be used by instances. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". For more information, read Specifying a Minimum CPU Platform. :param pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceArgs']]] network_interfaces: An array of network access configurations for this interface. :param pulumi.Input['NetworkPerformanceConfigArgs'] network_performance_config: Note that for MachineImage, this is not supported yet. :param pulumi.Input['InstancePropertiesPostKeyRevocationActionType'] post_key_revocation_action_type: PostKeyRevocationActionType of the instance. :param pulumi.Input['InstancePropertiesPrivateIpv6GoogleAccess'] private_ipv6_google_access: The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default. Note that for MachineImage, this is not supported yet. :param pulumi.Input['ReservationAffinityArgs'] reservation_affinity: Specifies the reservations that instances can consume from. Note that for MachineImage, this is not supported yet. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_manager_tags: Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. :param pulumi.Input[Sequence[pulumi.Input[str]]] resource_policies: Resource policies (names, not URLs) applied to instances created from these properties. Note that for MachineImage, this is not supported yet. :param pulumi.Input['SchedulingArgs'] scheduling: Specifies the scheduling options for the instances that are created from these properties. :param pulumi.Input[Sequence[pulumi.Input[str]]] secure_tags: [Input Only] Secure tags to apply to this instance. Maximum number of secure tags allowed is 50. Note that for MachineImage, this is not supported yet. :param pulumi.Input[Sequence[pulumi.Input['ServiceAccountArgs']]] service_accounts: A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from these properties. Use metadata queries to obtain the access tokens for these instances. :param pulumi.Input['ShieldedInstanceConfigArgs'] shielded_instance_config: Note that for MachineImage, this is not supported yet. :param pulumi.Input['ShieldedVmConfigArgs'] shielded_vm_config: Specifies the Shielded VM options for the instances that are created from these properties. :param pulumi.Input['TagsArgs'] tags: A list of tags to apply to the instances that are created from these properties. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035. """ if advanced_machine_features is not None: pulumi.set(__self__, "advanced_machine_features", advanced_machine_features) if can_ip_forward is not None: pulumi.set(__self__, "can_ip_forward", can_ip_forward) if confidential_instance_config is not None: pulumi.set(__self__, "confidential_instance_config", confidential_instance_config) if description is not None: pulumi.set(__self__, "description", description) if disks is not None: pulumi.set(__self__, "disks", disks) if display_device is not None: pulumi.set(__self__, "display_device", display_device) if guest_accelerators is not None: pulumi.set(__self__, "guest_accelerators", guest_accelerators) if key_revocation_action_type is not None: pulumi.set(__self__, "key_revocation_action_type", key_revocation_action_type) if labels is not None: pulumi.set(__self__, "labels", labels) if machine_type is not None: pulumi.set(__self__, "machine_type", machine_type) if metadata is not None: pulumi.set(__self__, "metadata", metadata) if min_cpu_platform is not None: pulumi.set(__self__, "min_cpu_platform", min_cpu_platform) if network_interfaces is not None: pulumi.set(__self__, "network_interfaces", network_interfaces) if network_performance_config is not None: pulumi.set(__self__, "network_performance_config", network_performance_config) if post_key_revocation_action_type is not None: pulumi.set(__self__, "post_key_revocation_action_type", post_key_revocation_action_type) if private_ipv6_google_access is not None: pulumi.set(__self__, "private_ipv6_google_access", private_ipv6_google_access) if reservation_affinity is not None: pulumi.set(__self__, "reservation_affinity", reservation_affinity) if resource_manager_tags is not None: pulumi.set(__self__, "resource_manager_tags", resource_manager_tags) if resource_policies is not None: pulumi.set(__self__, "resource_policies", resource_policies) if scheduling is not None: pulumi.set(__self__, "scheduling", scheduling) if secure_tags is not None: pulumi.set(__self__, "secure_tags", secure_tags) if service_accounts is not None: pulumi.set(__self__, "service_accounts", service_accounts) if shielded_instance_config is not None: pulumi.set(__self__, "shielded_instance_config", shielded_instance_config) if shielded_vm_config is not None: pulumi.set(__self__, "shielded_vm_config", shielded_vm_config) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="advancedMachineFeatures") def advanced_machine_features(self) -> Optional[pulumi.Input['AdvancedMachineFeaturesArgs']]: """ Controls for advanced machine-related behavior features. Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "advanced_machine_features") @advanced_machine_features.setter def advanced_machine_features(self, value: Optional[pulumi.Input['AdvancedMachineFeaturesArgs']]): pulumi.set(self, "advanced_machine_features", value) @property @pulumi.getter(name="canIpForward") def can_ip_forward(self) -> Optional[pulumi.Input[bool]]: """ Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information. """ return pulumi.get(self, "can_ip_forward") @can_ip_forward.setter def can_ip_forward(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "can_ip_forward", value) @property @pulumi.getter(name="confidentialInstanceConfig") def confidential_instance_config(self) -> Optional[pulumi.Input['ConfidentialInstanceConfigArgs']]: """ Specifies the Confidential Instance options. Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "confidential_instance_config") @confidential_instance_config.setter def confidential_instance_config(self, value: Optional[pulumi.Input['ConfidentialInstanceConfigArgs']]): pulumi.set(self, "confidential_instance_config", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional text description for the instances that are created from these properties. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AttachedDiskArgs']]]]: """ An array of disks that are associated with the instances that are created from these properties. """ return pulumi.get(self, "disks") @disks.setter def disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AttachedDiskArgs']]]]): pulumi.set(self, "disks", value) @property @pulumi.getter(name="displayDevice") def display_device(self) -> Optional[pulumi.Input['DisplayDeviceArgs']]: """ Display Device properties to enable support for remote display products like: Teradici, VNC and TeamViewer Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "display_device") @display_device.setter def display_device(self, value: Optional[pulumi.Input['DisplayDeviceArgs']]): pulumi.set(self, "display_device", value) @property @pulumi.getter(name="guestAccelerators") def guest_accelerators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]]: """ A list of guest accelerator cards' type and count to use for instances created from these properties. """ return pulumi.get(self, "guest_accelerators") @guest_accelerators.setter def guest_accelerators(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]]): pulumi.set(self, "guest_accelerators", value) @property @pulumi.getter(name="keyRevocationActionType") def key_revocation_action_type(self) -> Optional[pulumi.Input['InstancePropertiesKeyRevocationActionType']]: """ KeyRevocationActionType of the instance. """ return pulumi.get(self, "key_revocation_action_type") @key_revocation_action_type.setter def key_revocation_action_type(self, value: Optional[pulumi.Input['InstancePropertiesKeyRevocationActionType']]): pulumi.set(self, "key_revocation_action_type", value) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Labels to apply to instances that are created from these properties. """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter(name="machineType") def machine_type(self) -> Optional[pulumi.Input[str]]: """ The machine type to use for instances that are created from these properties. """ return pulumi.get(self, "machine_type") @machine_type.setter def machine_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "machine_type", value) @property @pulumi.getter def metadata(self) -> Optional[pulumi.Input['MetadataArgs']]: """ The metadata key/value pairs to assign to instances that are created from these properties. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information. """ return pulumi.get(self, "metadata") @metadata.setter def metadata(self, value: Optional[pulumi.Input['MetadataArgs']]): pulumi.set(self, "metadata", value) @property @pulumi.getter(name="minCpuPlatform") def min_cpu_platform(self) -> Optional[pulumi.Input[str]]: """ Minimum cpu/platform to be used by instances. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". For more information, read Specifying a Minimum CPU Platform. """ return pulumi.get(self, "min_cpu_platform") @min_cpu_platform.setter def min_cpu_platform(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "min_cpu_platform", value) @property @pulumi.getter(name="networkInterfaces") def network_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceArgs']]]]: """ An array of network access configurations for this interface. """ return pulumi.get(self, "network_interfaces") @network_interfaces.setter def network_interfaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceArgs']]]]): pulumi.set(self, "network_interfaces", value) @property @pulumi.getter(name="networkPerformanceConfig") def network_performance_config(self) -> Optional[pulumi.Input['NetworkPerformanceConfigArgs']]: """ Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "network_performance_config") @network_performance_config.setter def network_performance_config(self, value: Optional[pulumi.Input['NetworkPerformanceConfigArgs']]): pulumi.set(self, "network_performance_config", value) @property @pulumi.getter(name="postKeyRevocationActionType") def post_key_revocation_action_type(self) -> Optional[pulumi.Input['InstancePropertiesPostKeyRevocationActionType']]: """ PostKeyRevocationActionType of the instance. """ return pulumi.get(self, "post_key_revocation_action_type") @post_key_revocation_action_type.setter def post_key_revocation_action_type(self, value: Optional[pulumi.Input['InstancePropertiesPostKeyRevocationActionType']]): pulumi.set(self, "post_key_revocation_action_type", value) @property @pulumi.getter(name="privateIpv6GoogleAccess") def private_ipv6_google_access(self) -> Optional[pulumi.Input['InstancePropertiesPrivateIpv6GoogleAccess']]: """ The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default. Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "private_ipv6_google_access") @private_ipv6_google_access.setter def private_ipv6_google_access(self, value: Optional[pulumi.Input['InstancePropertiesPrivateIpv6GoogleAccess']]): pulumi.set(self, "private_ipv6_google_access", value) @property @pulumi.getter(name="reservationAffinity") def reservation_affinity(self) -> Optional[pulumi.Input['ReservationAffinityArgs']]: """ Specifies the reservations that instances can consume from. Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "reservation_affinity") @reservation_affinity.setter def reservation_affinity(self, value: Optional[pulumi.Input['ReservationAffinityArgs']]): pulumi.set(self, "reservation_affinity", value) @property @pulumi.getter(name="resourceManagerTags") def resource_manager_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. """ return pulumi.get(self, "resource_manager_tags") @resource_manager_tags.setter def resource_manager_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "resource_manager_tags", value) @property @pulumi.getter(name="resourcePolicies") def resource_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Resource policies (names, not URLs) applied to instances created from these properties. Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "resource_policies") @resource_policies.setter def resource_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "resource_policies", value) @property @pulumi.getter def scheduling(self) -> Optional[pulumi.Input['SchedulingArgs']]: """ Specifies the scheduling options for the instances that are created from these properties. """ return pulumi.get(self, "scheduling") @scheduling.setter def scheduling(self, value: Optional[pulumi.Input['SchedulingArgs']]): pulumi.set(self, "scheduling", value) @property @pulumi.getter(name="secureTags") def secure_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ [Input Only] Secure tags to apply to this instance. Maximum number of secure tags allowed is 50. Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "secure_tags") @secure_tags.setter def secure_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "secure_tags", value) @property @pulumi.getter(name="serviceAccounts") def service_accounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceAccountArgs']]]]: """ A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from these properties. Use metadata queries to obtain the access tokens for these instances. """ return pulumi.get(self, "service_accounts") @service_accounts.setter def service_accounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceAccountArgs']]]]): pulumi.set(self, "service_accounts", value) @property @pulumi.getter(name="shieldedInstanceConfig") def shielded_instance_config(self) -> Optional[pulumi.Input['ShieldedInstanceConfigArgs']]: """ Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "shielded_instance_config") @shielded_instance_config.setter def shielded_instance_config(self, value: Optional[pulumi.Input['ShieldedInstanceConfigArgs']]): pulumi.set(self, "shielded_instance_config", value) @property @pulumi.getter(name="shieldedVmConfig") def shielded_vm_config(self) -> Optional[pulumi.Input['ShieldedVmConfigArgs']]: """ Specifies the Shielded VM options for the instances that are created from these properties. """ return pulumi.get(self, "shielded_vm_config") @shielded_vm_config.setter def shielded_vm_config(self, value: Optional[pulumi.Input['ShieldedVmConfigArgs']]): pulumi.set(self, "shielded_vm_config", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input['TagsArgs']]: """ A list of tags to apply to the instances that are created from these properties. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input['TagsArgs']]): pulumi.set(self, "tags", value) @pulumi.input_type class Int64RangeMatchArgs: def __init__(__self__, *, range_end: Optional[pulumi.Input[str]] = None, range_start: Optional[pulumi.Input[str]] = None): """ HttpRouteRuleMatch criteria for field values that must stay within the specified integer range. :param pulumi.Input[str] range_end: The end of the range (exclusive) in signed long integer format. :param pulumi.Input[str] range_start: The start of the range (inclusive) in signed long integer format. """ if range_end is not None: pulumi.set(__self__, "range_end", range_end) if range_start is not None: pulumi.set(__self__, "range_start", range_start) @property @pulumi.getter(name="rangeEnd") def range_end(self) -> Optional[pulumi.Input[str]]: """ The end of the range (exclusive) in signed long integer format. """ return pulumi.get(self, "range_end") @range_end.setter def range_end(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "range_end", value) @property @pulumi.getter(name="rangeStart") def range_start(self) -> Optional[pulumi.Input[str]]: """ The start of the range (inclusive) in signed long integer format. """ return pulumi.get(self, "range_start") @range_start.setter def range_start(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "range_start", value) @pulumi.input_type class InterconnectAttachmentPartnerMetadataArgs: def __init__(__self__, *, interconnect_name: Optional[pulumi.Input[str]] = None, partner_name: Optional[pulumi.Input[str]] = None, portal_url: Optional[pulumi.Input[str]] = None): """ Informational metadata about Partner attachments from Partners to display to customers. These fields are propagated from PARTNER_PROVIDER attachments to their corresponding PARTNER attachments. :param pulumi.Input[str] interconnect_name: Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner's portal. For instance "Chicago 1". This value may be validated to match approved Partner values. :param pulumi.Input[str] partner_name: Plain text name of the Partner providing this attachment. This value may be validated to match approved Partner values. :param pulumi.Input[str] portal_url: URL of the Partner's portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values. """ if interconnect_name is not None: pulumi.set(__self__, "interconnect_name", interconnect_name) if partner_name is not None: pulumi.set(__self__, "partner_name", partner_name) if portal_url is not None: pulumi.set(__self__, "portal_url", portal_url) @property @pulumi.getter(name="interconnectName") def interconnect_name(self) -> Optional[pulumi.Input[str]]: """ Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner's portal. For instance "Chicago 1". This value may be validated to match approved Partner values. """ return pulumi.get(self, "interconnect_name") @interconnect_name.setter def interconnect_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "interconnect_name", value) @property @pulumi.getter(name="partnerName") def partner_name(self) -> Optional[pulumi.Input[str]]: """ Plain text name of the Partner providing this attachment. This value may be validated to match approved Partner values. """ return pulumi.get(self, "partner_name") @partner_name.setter def partner_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "partner_name", value) @property @pulumi.getter(name="portalUrl") def portal_url(self) -> Optional[pulumi.Input[str]]: """ URL of the Partner's portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values. """ return pulumi.get(self, "portal_url") @portal_url.setter def portal_url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "portal_url", value) @pulumi.input_type class InterconnectMacsecPreSharedKeyArgs: def __init__(__self__, *, name: pulumi.Input[str], start_time: Optional[pulumi.Input[str]] = None): """ Describes a pre-shared key used to setup MACsec in static connectivity association key (CAK) mode. :param pulumi.Input[str] name: A name for this pre-shared key. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] start_time: A RFC3339 timestamp on or after which the key is valid. startTime can be in the future. If the keychain has a single key, startTime can be omitted. If the keychain has multiple keys, startTime is mandatory for each key. The start times of keys must be in increasing order. The start times of two consecutive keys must be at least 6 hours apart. """ pulumi.set(__self__, "name", name) if start_time is not None: pulumi.set(__self__, "start_time", start_time) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ A name for this pre-shared key. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ A RFC3339 timestamp on or after which the key is valid. startTime can be in the future. If the keychain has a single key, startTime can be omitted. If the keychain has multiple keys, startTime is mandatory for each key. The start times of keys must be in increasing order. The start times of two consecutive keys must be at least 6 hours apart. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @pulumi.input_type class InterconnectMacsecArgs: def __init__(__self__, *, pre_shared_keys: pulumi.Input[Sequence[pulumi.Input['InterconnectMacsecPreSharedKeyArgs']]], fail_open: Optional[pulumi.Input[bool]] = None): """ Configuration information for enabling Media Access Control security (Macsec) on this Interconnect between Google and your on-premises router. :param pulumi.Input[Sequence[pulumi.Input['InterconnectMacsecPreSharedKeyArgs']]] pre_shared_keys: A keychain placeholder describing a set of named key objects along with their start times. A MACsec CKN/CAK will be generated for each key in the key chain. Google router will automatically pick the key with the most recent startTime when establishing or re-establishing a MACsec secure link. :param pulumi.Input[bool] fail_open: If set to true, the Interconnect will be configured with a should-secure MACsec security policy, that allows the Google router to fallback to cleartext traffic if the MKA session cannot be established. By default, the Interconnect will be configured with a must-secure security policy that drops all traffic if the MKA session cannot be established with your router. """ pulumi.set(__self__, "pre_shared_keys", pre_shared_keys) if fail_open is not None: pulumi.set(__self__, "fail_open", fail_open) @property @pulumi.getter(name="preSharedKeys") def pre_shared_keys(self) -> pulumi.Input[Sequence[pulumi.Input['InterconnectMacsecPreSharedKeyArgs']]]: """ A keychain placeholder describing a set of named key objects along with their start times. A MACsec CKN/CAK will be generated for each key in the key chain. Google router will automatically pick the key with the most recent startTime when establishing or re-establishing a MACsec secure link. """ return pulumi.get(self, "pre_shared_keys") @pre_shared_keys.setter def pre_shared_keys(self, value: pulumi.Input[Sequence[pulumi.Input['InterconnectMacsecPreSharedKeyArgs']]]): pulumi.set(self, "pre_shared_keys", value) @property @pulumi.getter(name="failOpen") def fail_open(self) -> Optional[pulumi.Input[bool]]: """ If set to true, the Interconnect will be configured with a should-secure MACsec security policy, that allows the Google router to fallback to cleartext traffic if the MKA session cannot be established. By default, the Interconnect will be configured with a must-secure security policy that drops all traffic if the MKA session cannot be established with your router. """ return pulumi.get(self, "fail_open") @fail_open.setter def fail_open(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "fail_open", value) @pulumi.input_type class LicenseResourceCommitmentArgs: def __init__(__self__, *, amount: Optional[pulumi.Input[str]] = None, cores_per_license: Optional[pulumi.Input[str]] = None, license: Optional[pulumi.Input[str]] = None): """ Commitment for a particular license resource. :param pulumi.Input[str] amount: The number of licenses purchased. :param pulumi.Input[str] cores_per_license: Specifies the core range of the instance for which this license applies. :param pulumi.Input[str] license: Any applicable license URI. """ if amount is not None: pulumi.set(__self__, "amount", amount) if cores_per_license is not None: pulumi.set(__self__, "cores_per_license", cores_per_license) if license is not None: pulumi.set(__self__, "license", license) @property @pulumi.getter def amount(self) -> Optional[pulumi.Input[str]]: """ The number of licenses purchased. """ return pulumi.get(self, "amount") @amount.setter def amount(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "amount", value) @property @pulumi.getter(name="coresPerLicense") def cores_per_license(self) -> Optional[pulumi.Input[str]]: """ Specifies the core range of the instance for which this license applies. """ return pulumi.get(self, "cores_per_license") @cores_per_license.setter def cores_per_license(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cores_per_license", value) @property @pulumi.getter def license(self) -> Optional[pulumi.Input[str]]: """ Any applicable license URI. """ return pulumi.get(self, "license") @license.setter def license(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "license", value) @pulumi.input_type class LicenseResourceRequirementsArgs: def __init__(__self__, *, min_guest_cpu_count: Optional[pulumi.Input[int]] = None, min_memory_mb: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] min_guest_cpu_count: Minimum number of guest cpus required to use the Instance. Enforced at Instance creation and Instance start. :param pulumi.Input[int] min_memory_mb: Minimum memory required to use the Instance. Enforced at Instance creation and Instance start. """ if min_guest_cpu_count is not None: pulumi.set(__self__, "min_guest_cpu_count", min_guest_cpu_count) if min_memory_mb is not None: pulumi.set(__self__, "min_memory_mb", min_memory_mb) @property @pulumi.getter(name="minGuestCpuCount") def min_guest_cpu_count(self) -> Optional[pulumi.Input[int]]: """ Minimum number of guest cpus required to use the Instance. Enforced at Instance creation and Instance start. """ return pulumi.get(self, "min_guest_cpu_count") @min_guest_cpu_count.setter def min_guest_cpu_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_guest_cpu_count", value) @property @pulumi.getter(name="minMemoryMb") def min_memory_mb(self) -> Optional[pulumi.Input[int]]: """ Minimum memory required to use the Instance. Enforced at Instance creation and Instance start. """ return pulumi.get(self, "min_memory_mb") @min_memory_mb.setter def min_memory_mb(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_memory_mb", value) @pulumi.input_type class LocalDiskArgs: def __init__(__self__, *, disk_count: Optional[pulumi.Input[int]] = None, disk_size_gb: Optional[pulumi.Input[int]] = None, disk_type: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[int] disk_count: Specifies the number of such disks. :param pulumi.Input[int] disk_size_gb: Specifies the size of the disk in base-2 GB. :param pulumi.Input[str] disk_type: Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL. """ if disk_count is not None: pulumi.set(__self__, "disk_count", disk_count) if disk_size_gb is not None: pulumi.set(__self__, "disk_size_gb", disk_size_gb) if disk_type is not None: pulumi.set(__self__, "disk_type", disk_type) @property @pulumi.getter(name="diskCount") def disk_count(self) -> Optional[pulumi.Input[int]]: """ Specifies the number of such disks. """ return pulumi.get(self, "disk_count") @disk_count.setter def disk_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "disk_count", value) @property @pulumi.getter(name="diskSizeGb") def disk_size_gb(self) -> Optional[pulumi.Input[int]]: """ Specifies the size of the disk in base-2 GB. """ return pulumi.get(self, "disk_size_gb") @disk_size_gb.setter def disk_size_gb(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "disk_size_gb", value) @property @pulumi.getter(name="diskType") def disk_type(self) -> Optional[pulumi.Input[str]]: """ Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL. """ return pulumi.get(self, "disk_type") @disk_type.setter def disk_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_type", value) @pulumi.input_type class LogConfigCloudAuditOptionsArgs: def __init__(__self__, *, authorization_logging_options: Optional[pulumi.Input['AuthorizationLoggingOptionsArgs']] = None, log_name: Optional[pulumi.Input['LogConfigCloudAuditOptionsLogName']] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input['AuthorizationLoggingOptionsArgs'] authorization_logging_options: This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigCloudAuditOptionsLogName'] log_name: This is deprecated and has no effect. Do not use. """ if authorization_logging_options is not None: pulumi.set(__self__, "authorization_logging_options", authorization_logging_options) if log_name is not None: pulumi.set(__self__, "log_name", log_name) @property @pulumi.getter(name="authorizationLoggingOptions") def authorization_logging_options(self) -> Optional[pulumi.Input['AuthorizationLoggingOptionsArgs']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "authorization_logging_options") @authorization_logging_options.setter def authorization_logging_options(self, value: Optional[pulumi.Input['AuthorizationLoggingOptionsArgs']]): pulumi.set(self, "authorization_logging_options", value) @property @pulumi.getter(name="logName") def log_name(self) -> Optional[pulumi.Input['LogConfigCloudAuditOptionsLogName']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "log_name") @log_name.setter def log_name(self, value: Optional[pulumi.Input['LogConfigCloudAuditOptionsLogName']]): pulumi.set(self, "log_name", value) @pulumi.input_type class LogConfigCounterOptionsCustomFieldArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input[str] name: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] value: This is deprecated and has no effect. Do not use. """ if name is not None: pulumi.set(__self__, "name", name) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class LogConfigCounterOptionsArgs: def __init__(__self__, *, custom_fields: Optional[pulumi.Input[Sequence[pulumi.Input['LogConfigCounterOptionsCustomFieldArgs']]]] = None, field: Optional[pulumi.Input[str]] = None, metric: Optional[pulumi.Input[str]] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input['LogConfigCounterOptionsCustomFieldArgs']]] custom_fields: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] field: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] metric: This is deprecated and has no effect. Do not use. """ if custom_fields is not None: pulumi.set(__self__, "custom_fields", custom_fields) if field is not None: pulumi.set(__self__, "field", field) if metric is not None: pulumi.set(__self__, "metric", metric) @property @pulumi.getter(name="customFields") def custom_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LogConfigCounterOptionsCustomFieldArgs']]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "custom_fields") @custom_fields.setter def custom_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LogConfigCounterOptionsCustomFieldArgs']]]]): pulumi.set(self, "custom_fields", value) @property @pulumi.getter def field(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "field") @field.setter def field(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "field", value) @property @pulumi.getter def metric(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "metric") @metric.setter def metric(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "metric", value) @pulumi.input_type class LogConfigDataAccessOptionsArgs: def __init__(__self__, *, log_mode: Optional[pulumi.Input['LogConfigDataAccessOptionsLogMode']] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigDataAccessOptionsLogMode'] log_mode: This is deprecated and has no effect. Do not use. """ if log_mode is not None: pulumi.set(__self__, "log_mode", log_mode) @property @pulumi.getter(name="logMode") def log_mode(self) -> Optional[pulumi.Input['LogConfigDataAccessOptionsLogMode']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "log_mode") @log_mode.setter def log_mode(self, value: Optional[pulumi.Input['LogConfigDataAccessOptionsLogMode']]): pulumi.set(self, "log_mode", value) @pulumi.input_type class LogConfigArgs: def __init__(__self__, *, cloud_audit: Optional[pulumi.Input['LogConfigCloudAuditOptionsArgs']] = None, counter: Optional[pulumi.Input['LogConfigCounterOptionsArgs']] = None, data_access: Optional[pulumi.Input['LogConfigDataAccessOptionsArgs']] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigCloudAuditOptionsArgs'] cloud_audit: This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigCounterOptionsArgs'] counter: This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigDataAccessOptionsArgs'] data_access: This is deprecated and has no effect. Do not use. """ if cloud_audit is not None: pulumi.set(__self__, "cloud_audit", cloud_audit) if counter is not None: pulumi.set(__self__, "counter", counter) if data_access is not None: pulumi.set(__self__, "data_access", data_access) @property @pulumi.getter(name="cloudAudit") def cloud_audit(self) -> Optional[pulumi.Input['LogConfigCloudAuditOptionsArgs']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "cloud_audit") @cloud_audit.setter def cloud_audit(self, value: Optional[pulumi.Input['LogConfigCloudAuditOptionsArgs']]): pulumi.set(self, "cloud_audit", value) @property @pulumi.getter def counter(self) -> Optional[pulumi.Input['LogConfigCounterOptionsArgs']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "counter") @counter.setter def counter(self, value: Optional[pulumi.Input['LogConfigCounterOptionsArgs']]): pulumi.set(self, "counter", value) @property @pulumi.getter(name="dataAccess") def data_access(self) -> Optional[pulumi.Input['LogConfigDataAccessOptionsArgs']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "data_access") @data_access.setter def data_access(self, value: Optional[pulumi.Input['LogConfigDataAccessOptionsArgs']]): pulumi.set(self, "data_access", value) @pulumi.input_type class MetadataCredentialsFromPluginArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, struct_config: Optional[pulumi.Input[str]] = None): """ [Deprecated] Custom authenticator credentials. Custom authenticator credentials. :param pulumi.Input[str] name: Plugin name. :param pulumi.Input[str] struct_config: A text proto that conforms to a Struct type definition interpreted by the plugin. """ if name is not None: pulumi.set(__self__, "name", name) if struct_config is not None: pulumi.set(__self__, "struct_config", struct_config) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Plugin name. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="structConfig") def struct_config(self) -> Optional[pulumi.Input[str]]: """ A text proto that conforms to a Struct type definition interpreted by the plugin. """ return pulumi.get(self, "struct_config") @struct_config.setter def struct_config(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "struct_config", value) @pulumi.input_type class MetadataFilterLabelMatchArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ MetadataFilter label name value pairs that are expected to match corresponding labels presented as metadata to the load balancer. :param pulumi.Input[str] name: Name of metadata label. The name can have a maximum length of 1024 characters and must be at least 1 character long. :param pulumi.Input[str] value: The value of the label must match the specified value. value can have a maximum length of 1024 characters. """ if name is not None: pulumi.set(__self__, "name", name) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of metadata label. The name can have a maximum length of 1024 characters and must be at least 1 character long. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ The value of the label must match the specified value. value can have a maximum length of 1024 characters. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class MetadataFilterArgs: def __init__(__self__, *, filter_labels: Optional[pulumi.Input[Sequence[pulumi.Input['MetadataFilterLabelMatchArgs']]]] = None, filter_match_criteria: Optional[pulumi.Input['MetadataFilterFilterMatchCriteria']] = None): """ Opaque filter criteria used by load balancers to restrict routing configuration to a limited set of load balancing proxies. Proxies and sidecars involved in load balancing would typically present metadata to the load balancers that need to match criteria specified here. If a match takes place, the relevant configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. An example for using metadataFilters would be: if load balancing involves Envoys, they receive routing configuration when values in metadataFilters match values supplied in of their XDS requests to loadbalancers. :param pulumi.Input[Sequence[pulumi.Input['MetadataFilterLabelMatchArgs']]] filter_labels: The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria This list must not be empty and can have at the most 64 entries. :param pulumi.Input['MetadataFilterFilterMatchCriteria'] filter_match_criteria: Specifies how individual filter label matches within the list of filterLabels and contributes toward the overall metadataFilter match. Supported values are: - MATCH_ANY: at least one of the filterLabels must have a matching label in the provided metadata. - MATCH_ALL: all filterLabels must have matching labels in the provided metadata. """ if filter_labels is not None: pulumi.set(__self__, "filter_labels", filter_labels) if filter_match_criteria is not None: pulumi.set(__self__, "filter_match_criteria", filter_match_criteria) @property @pulumi.getter(name="filterLabels") def filter_labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MetadataFilterLabelMatchArgs']]]]: """ The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria This list must not be empty and can have at the most 64 entries. """ return pulumi.get(self, "filter_labels") @filter_labels.setter def filter_labels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MetadataFilterLabelMatchArgs']]]]): pulumi.set(self, "filter_labels", value) @property @pulumi.getter(name="filterMatchCriteria") def filter_match_criteria(self) -> Optional[pulumi.Input['MetadataFilterFilterMatchCriteria']]: """ Specifies how individual filter label matches within the list of filterLabels and contributes toward the overall metadataFilter match. Supported values are: - MATCH_ANY: at least one of the filterLabels must have a matching label in the provided metadata. - MATCH_ALL: all filterLabels must have matching labels in the provided metadata. """ return pulumi.get(self, "filter_match_criteria") @filter_match_criteria.setter def filter_match_criteria(self, value: Optional[pulumi.Input['MetadataFilterFilterMatchCriteria']]): pulumi.set(self, "filter_match_criteria", value) @pulumi.input_type class MetadataItemsItemArgs: def __init__(__self__, *, key: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ Metadata :param pulumi.Input[str] key: Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project. :param pulumi.Input[str] value: Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB). """ if key is not None: pulumi.set(__self__, "key", key) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: """ Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project. """ return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB). """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class MetadataArgs: def __init__(__self__, *, items: Optional[pulumi.Input[Sequence[pulumi.Input['MetadataItemsItemArgs']]]] = None): """ A metadata key/value entry. :param pulumi.Input[Sequence[pulumi.Input['MetadataItemsItemArgs']]] items: Array of key/value pairs. The total size of all keys and values must be less than 512 KB. """ if items is not None: pulumi.set(__self__, "items", items) @property @pulumi.getter def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MetadataItemsItemArgs']]]]: """ Array of key/value pairs. The total size of all keys and values must be less than 512 KB. """ return pulumi.get(self, "items") @items.setter def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MetadataItemsItemArgs']]]]): pulumi.set(self, "items", value) @pulumi.input_type class NamedPortArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, port: Optional[pulumi.Input[int]] = None): """ The named port. For example: <"http", 80>. :param pulumi.Input[str] name: The name for this named port. The name must be 1-63 characters long, and comply with RFC1035. :param pulumi.Input[int] port: The port number, which can be a value between 1 and 65535. """ if name is not None: pulumi.set(__self__, "name", name) if port is not None: pulumi.set(__self__, "port", port) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name for this named port. The name must be 1-63 characters long, and comply with RFC1035. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The port number, which can be a value between 1 and 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @pulumi.input_type class NetworkEndpointGroupAppEngineArgs: def __init__(__self__, *, service: Optional[pulumi.Input[str]] = None, url_mask: Optional[pulumi.Input[str]] = None, version: Optional[pulumi.Input[str]] = None): """ Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG. :param pulumi.Input[str] service: Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: "default", "my-service". :param pulumi.Input[str] url_mask: A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs "foo1-dot-appname.appspot.com/v1" and "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL mask will parse them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. :param pulumi.Input[str] version: Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: "v1", "v2". """ if service is not None: pulumi.set(__self__, "service", service) if url_mask is not None: pulumi.set(__self__, "url_mask", url_mask) if version is not None: pulumi.set(__self__, "version", version) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: "default", "my-service". """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @property @pulumi.getter(name="urlMask") def url_mask(self) -> Optional[pulumi.Input[str]]: """ A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs "foo1-dot-appname.appspot.com/v1" and "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL mask will parse them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. """ return pulumi.get(self, "url_mask") @url_mask.setter def url_mask(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url_mask", value) @property @pulumi.getter def version(self) -> Optional[pulumi.Input[str]]: """ Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: "v1", "v2". """ return pulumi.get(self, "version") @version.setter def version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "version", value) @pulumi.input_type class NetworkEndpointGroupCloudFunctionArgs: def __init__(__self__, *, function: Optional[pulumi.Input[str]] = None, url_mask: Optional[pulumi.Input[str]] = None): """ Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG. :param pulumi.Input[str] function: A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: "func1". :param pulumi.Input[str] url_mask: A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs " mydomain.com/function1" and "mydomain.com/function2" can be backed by the same Serverless NEG with URL mask "/". The URL mask will parse them to { function = "function1" } and { function = "function2" } respectively. """ if function is not None: pulumi.set(__self__, "function", function) if url_mask is not None: pulumi.set(__self__, "url_mask", url_mask) @property @pulumi.getter def function(self) -> Optional[pulumi.Input[str]]: """ A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: "func1". """ return pulumi.get(self, "function") @function.setter def function(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "function", value) @property @pulumi.getter(name="urlMask") def url_mask(self) -> Optional[pulumi.Input[str]]: """ A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs " mydomain.com/function1" and "mydomain.com/function2" can be backed by the same Serverless NEG with URL mask "/". The URL mask will parse them to { function = "function1" } and { function = "function2" } respectively. """ return pulumi.get(self, "url_mask") @url_mask.setter def url_mask(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url_mask", value) @pulumi.input_type class NetworkEndpointGroupCloudRunArgs: def __init__(__self__, *, service: Optional[pulumi.Input[str]] = None, tag: Optional[pulumi.Input[str]] = None, url_mask: Optional[pulumi.Input[str]] = None): """ Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG. :param pulumi.Input[str] service: Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: "run-service". :param pulumi.Input[str] tag: Optional Cloud Run tag represents the "named-revision" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: "revision-0010". :param pulumi.Input[str] url_mask: A template to parse service and tag fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } and { service="bar2", tag="foo2" } respectively. """ if service is not None: pulumi.set(__self__, "service", service) if tag is not None: pulumi.set(__self__, "tag", tag) if url_mask is not None: pulumi.set(__self__, "url_mask", url_mask) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: "run-service". """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @property @pulumi.getter def tag(self) -> Optional[pulumi.Input[str]]: """ Optional Cloud Run tag represents the "named-revision" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: "revision-0010". """ return pulumi.get(self, "tag") @tag.setter def tag(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "tag", value) @property @pulumi.getter(name="urlMask") def url_mask(self) -> Optional[pulumi.Input[str]]: """ A template to parse service and tag fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } and { service="bar2", tag="foo2" } respectively. """ return pulumi.get(self, "url_mask") @url_mask.setter def url_mask(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url_mask", value) @pulumi.input_type class NetworkEndpointGroupServerlessDeploymentArgs: def __init__(__self__, *, platform: Optional[pulumi.Input[str]] = None, resource: Optional[pulumi.Input[str]] = None, url_mask: Optional[pulumi.Input[str]] = None, version: Optional[pulumi.Input[str]] = None): """ Configuration for a serverless network endpoint group (NEG). The platform must be provided. Note: The target backend service must be in the same project and located in the same region as the Serverless NEG. :param pulumi.Input[str] platform: The platform of the backend target(s) of this NEG. Possible values include: 1. API Gateway: apigateway.googleapis.com 2. App Engine: appengine.googleapis.com 3. Cloud Functions: cloudfunctions.googleapis.com 4. Cloud Run: run.googleapis.com :param pulumi.Input[str] resource: The user-defined name of the workload/instance. This value must be provided explicitly or in the urlMask. The resource identified by this value is platform-specific and is as follows: 1. API Gateway: The gateway ID 2. App Engine: The service name 3. Cloud Functions: The function name 4. Cloud Run: The service name :param pulumi.Input[str] url_mask: A template to parse platform-specific fields from a request URL. URL mask allows for routing to multiple resources on the same serverless platform without having to create multiple Network Endpoint Groups and backend resources. The fields parsed by this template are platform-specific and are as follows: 1. API Gateway: The gateway ID 2. App Engine: The service and version 3. Cloud Functions: The function name 4. Cloud Run: The service and tag :param pulumi.Input[str] version: The optional resource version. The version identified by this value is platform-specific and is follows: 1. API Gateway: Unused 2. App Engine: The service version 3. Cloud Functions: Unused 4. Cloud Run: The service tag """ if platform is not None: pulumi.set(__self__, "platform", platform) if resource is not None: pulumi.set(__self__, "resource", resource) if url_mask is not None: pulumi.set(__self__, "url_mask", url_mask) if version is not None: pulumi.set(__self__, "version", version) @property @pulumi.getter def platform(self) -> Optional[pulumi.Input[str]]: """ The platform of the backend target(s) of this NEG. Possible values include: 1. API Gateway: apigateway.googleapis.com 2. App Engine: appengine.googleapis.com 3. Cloud Functions: cloudfunctions.googleapis.com 4. Cloud Run: run.googleapis.com """ return pulumi.get(self, "platform") @platform.setter def platform(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "platform", value) @property @pulumi.getter def resource(self) -> Optional[pulumi.Input[str]]: """ The user-defined name of the workload/instance. This value must be provided explicitly or in the urlMask. The resource identified by this value is platform-specific and is as follows: 1. API Gateway: The gateway ID 2. App Engine: The service name 3. Cloud Functions: The function name 4. Cloud Run: The service name """ return pulumi.get(self, "resource") @resource.setter def resource(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "resource", value) @property @pulumi.getter(name="urlMask") def url_mask(self) -> Optional[pulumi.Input[str]]: """ A template to parse platform-specific fields from a request URL. URL mask allows for routing to multiple resources on the same serverless platform without having to create multiple Network Endpoint Groups and backend resources. The fields parsed by this template are platform-specific and are as follows: 1. API Gateway: The gateway ID 2. App Engine: The service and version 3. Cloud Functions: The function name 4. Cloud Run: The service and tag """ return pulumi.get(self, "url_mask") @url_mask.setter def url_mask(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url_mask", value) @property @pulumi.getter def version(self) -> Optional[pulumi.Input[str]]: """ The optional resource version. The version identified by this value is platform-specific and is follows: 1. API Gateway: Unused 2. App Engine: The service version 3. Cloud Functions: Unused 4. Cloud Run: The service tag """ return pulumi.get(self, "version") @version.setter def version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "version", value) @pulumi.input_type class NetworkInterfaceSubInterfaceArgs: def __init__(__self__, *, ip_address: Optional[pulumi.Input[str]] = None, ip_allocation_mode: Optional[pulumi.Input['NetworkInterfaceSubInterfaceIpAllocationMode']] = None, subnetwork: Optional[pulumi.Input[str]] = None, vlan: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[str] ip_address: An IPv4 internal IP address to assign to the instance for this subinterface. If specified, ip_allocation_mode should be set to ALLOCATE_IP. :param pulumi.Input[str] subnetwork: If specified, this subnetwork must belong to the same network as that of the network interface. If not specified the subnet of network interface will be used. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork :param pulumi.Input[int] vlan: VLAN tag. Should match the VLAN(s) supported by the subnetwork to which this subinterface is connecting. """ if ip_address is not None: pulumi.set(__self__, "ip_address", ip_address) if ip_allocation_mode is not None: pulumi.set(__self__, "ip_allocation_mode", ip_allocation_mode) if subnetwork is not None: pulumi.set(__self__, "subnetwork", subnetwork) if vlan is not None: pulumi.set(__self__, "vlan", vlan) @property @pulumi.getter(name="ipAddress") def ip_address(self) -> Optional[pulumi.Input[str]]: """ An IPv4 internal IP address to assign to the instance for this subinterface. If specified, ip_allocation_mode should be set to ALLOCATE_IP. """ return pulumi.get(self, "ip_address") @ip_address.setter def ip_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_address", value) @property @pulumi.getter(name="ipAllocationMode") def ip_allocation_mode(self) -> Optional[pulumi.Input['NetworkInterfaceSubInterfaceIpAllocationMode']]: return pulumi.get(self, "ip_allocation_mode") @ip_allocation_mode.setter def ip_allocation_mode(self, value: Optional[pulumi.Input['NetworkInterfaceSubInterfaceIpAllocationMode']]): pulumi.set(self, "ip_allocation_mode", value) @property @pulumi.getter def subnetwork(self) -> Optional[pulumi.Input[str]]: """ If specified, this subnetwork must belong to the same network as that of the network interface. If not specified the subnet of network interface will be used. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork """ return pulumi.get(self, "subnetwork") @subnetwork.setter def subnetwork(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "subnetwork", value) @property @pulumi.getter def vlan(self) -> Optional[pulumi.Input[int]]: """ VLAN tag. Should match the VLAN(s) supported by the subnetwork to which this subinterface is connecting. """ return pulumi.get(self, "vlan") @vlan.setter def vlan(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "vlan", value) @pulumi.input_type class NetworkInterfaceArgs: def __init__(__self__, *, access_configs: Optional[pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]]] = None, alias_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input['AliasIpRangeArgs']]]] = None, internal_ipv6_prefix_length: Optional[pulumi.Input[int]] = None, ipv6_access_configs: Optional[pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]]] = None, ipv6_address: Optional[pulumi.Input[str]] = None, network: Optional[pulumi.Input[str]] = None, network_ip: Optional[pulumi.Input[str]] = None, nic_type: Optional[pulumi.Input['NetworkInterfaceNicType']] = None, queue_count: Optional[pulumi.Input[int]] = None, stack_type: Optional[pulumi.Input['NetworkInterfaceStackType']] = None, subinterfaces: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceSubInterfaceArgs']]]] = None, subnetwork: Optional[pulumi.Input[str]] = None): """ A network interface resource attached to an instance. :param pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]] access_configs: An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access. :param pulumi.Input[Sequence[pulumi.Input['AliasIpRangeArgs']]] alias_ip_ranges: An array of alias IP ranges for this network interface. You can only specify this field for network interfaces in VPC networks. :param pulumi.Input[int] internal_ipv6_prefix_length: The prefix length of the primary internal IPv6 range. :param pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]] ipv6_access_configs: An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. :param pulumi.Input[str] ipv6_address: An IPv6 internal network address for this network interface. :param pulumi.Input[str] network: URL of the VPC network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used. If the selected project doesn't have the default network, you must specify a network or subnet. If the network is not specified but the subnetwork is specified, the network is inferred. If you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/global/networks/ network - projects/project/global/networks/network - global/networks/default :param pulumi.Input[str] network_ip: An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. :param pulumi.Input['NetworkInterfaceNicType'] nic_type: The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. :param pulumi.Input[int] queue_count: The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. :param pulumi.Input['NetworkInterfaceStackType'] stack_type: The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations. :param pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceSubInterfaceArgs']]] subinterfaces: SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. :param pulumi.Input[str] subnetwork: The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork """ if access_configs is not None: pulumi.set(__self__, "access_configs", access_configs) if alias_ip_ranges is not None: pulumi.set(__self__, "alias_ip_ranges", alias_ip_ranges) if internal_ipv6_prefix_length is not None: pulumi.set(__self__, "internal_ipv6_prefix_length", internal_ipv6_prefix_length) if ipv6_access_configs is not None: pulumi.set(__self__, "ipv6_access_configs", ipv6_access_configs) if ipv6_address is not None: pulumi.set(__self__, "ipv6_address", ipv6_address) if network is not None: pulumi.set(__self__, "network", network) if network_ip is not None: pulumi.set(__self__, "network_ip", network_ip) if nic_type is not None: pulumi.set(__self__, "nic_type", nic_type) if queue_count is not None: pulumi.set(__self__, "queue_count", queue_count) if stack_type is not None: pulumi.set(__self__, "stack_type", stack_type) if subinterfaces is not None: pulumi.set(__self__, "subinterfaces", subinterfaces) if subnetwork is not None: pulumi.set(__self__, "subnetwork", subnetwork) @property @pulumi.getter(name="accessConfigs") def access_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]]]: """ An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access. """ return pulumi.get(self, "access_configs") @access_configs.setter def access_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]]]): pulumi.set(self, "access_configs", value) @property @pulumi.getter(name="aliasIpRanges") def alias_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AliasIpRangeArgs']]]]: """ An array of alias IP ranges for this network interface. You can only specify this field for network interfaces in VPC networks. """ return pulumi.get(self, "alias_ip_ranges") @alias_ip_ranges.setter def alias_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AliasIpRangeArgs']]]]): pulumi.set(self, "alias_ip_ranges", value) @property @pulumi.getter(name="internalIpv6PrefixLength") def internal_ipv6_prefix_length(self) -> Optional[pulumi.Input[int]]: """ The prefix length of the primary internal IPv6 range. """ return pulumi.get(self, "internal_ipv6_prefix_length") @internal_ipv6_prefix_length.setter def internal_ipv6_prefix_length(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "internal_ipv6_prefix_length", value) @property @pulumi.getter(name="ipv6AccessConfigs") def ipv6_access_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]]]: """ An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. """ return pulumi.get(self, "ipv6_access_configs") @ipv6_access_configs.setter def ipv6_access_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]]]): pulumi.set(self, "ipv6_access_configs", value) @property @pulumi.getter(name="ipv6Address") def ipv6_address(self) -> Optional[pulumi.Input[str]]: """ An IPv6 internal network address for this network interface. """ return pulumi.get(self, "ipv6_address") @ipv6_address.setter def ipv6_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ipv6_address", value) @property @pulumi.getter def network(self) -> Optional[pulumi.Input[str]]: """ URL of the VPC network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used. If the selected project doesn't have the default network, you must specify a network or subnet. If the network is not specified but the subnetwork is specified, the network is inferred. If you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/global/networks/ network - projects/project/global/networks/network - global/networks/default """ return pulumi.get(self, "network") @network.setter def network(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "network", value) @property @pulumi.getter(name="networkIP") def network_ip(self) -> Optional[pulumi.Input[str]]: """ An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. """ return pulumi.get(self, "network_ip") @network_ip.setter def network_ip(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "network_ip", value) @property @pulumi.getter(name="nicType") def nic_type(self) -> Optional[pulumi.Input['NetworkInterfaceNicType']]: """ The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. """ return pulumi.get(self, "nic_type") @nic_type.setter def nic_type(self, value: Optional[pulumi.Input['NetworkInterfaceNicType']]): pulumi.set(self, "nic_type", value) @property @pulumi.getter(name="queueCount") def queue_count(self) -> Optional[pulumi.Input[int]]: """ The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. """ return pulumi.get(self, "queue_count") @queue_count.setter def queue_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "queue_count", value) @property @pulumi.getter(name="stackType") def stack_type(self) -> Optional[pulumi.Input['NetworkInterfaceStackType']]: """ The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations. """ return pulumi.get(self, "stack_type") @stack_type.setter def stack_type(self, value: Optional[pulumi.Input['NetworkInterfaceStackType']]): pulumi.set(self, "stack_type", value) @property @pulumi.getter def subinterfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceSubInterfaceArgs']]]]: """ SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. """ return pulumi.get(self, "subinterfaces") @subinterfaces.setter def subinterfaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceSubInterfaceArgs']]]]): pulumi.set(self, "subinterfaces", value) @property @pulumi.getter def subnetwork(self) -> Optional[pulumi.Input[str]]: """ The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork """ return pulumi.get(self, "subnetwork") @subnetwork.setter def subnetwork(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "subnetwork", value) @pulumi.input_type class NetworkPerformanceConfigArgs: def __init__(__self__, *, external_ip_egress_bandwidth_tier: Optional[pulumi.Input['NetworkPerformanceConfigExternalIpEgressBandwidthTier']] = None, total_egress_bandwidth_tier: Optional[pulumi.Input['NetworkPerformanceConfigTotalEgressBandwidthTier']] = None): if external_ip_egress_bandwidth_tier is not None: pulumi.set(__self__, "external_ip_egress_bandwidth_tier", external_ip_egress_bandwidth_tier) if total_egress_bandwidth_tier is not None: pulumi.set(__self__, "total_egress_bandwidth_tier", total_egress_bandwidth_tier) @property @pulumi.getter(name="externalIpEgressBandwidthTier") def external_ip_egress_bandwidth_tier(self) -> Optional[pulumi.Input['NetworkPerformanceConfigExternalIpEgressBandwidthTier']]: return pulumi.get(self, "external_ip_egress_bandwidth_tier") @external_ip_egress_bandwidth_tier.setter def external_ip_egress_bandwidth_tier(self, value: Optional[pulumi.Input['NetworkPerformanceConfigExternalIpEgressBandwidthTier']]): pulumi.set(self, "external_ip_egress_bandwidth_tier", value) @property @pulumi.getter(name="totalEgressBandwidthTier") def total_egress_bandwidth_tier(self) -> Optional[pulumi.Input['NetworkPerformanceConfigTotalEgressBandwidthTier']]: return pulumi.get(self, "total_egress_bandwidth_tier") @total_egress_bandwidth_tier.setter def total_egress_bandwidth_tier(self, value: Optional[pulumi.Input['NetworkPerformanceConfigTotalEgressBandwidthTier']]): pulumi.set(self, "total_egress_bandwidth_tier", value) @pulumi.input_type class NetworkRoutingConfigArgs: def __init__(__self__, *, routing_mode: Optional[pulumi.Input['NetworkRoutingConfigRoutingMode']] = None): """ A routing configuration attached to a network resource. The message includes the list of routers associated with the network, and a flag indicating the type of routing behavior to enforce network-wide. :param pulumi.Input['NetworkRoutingConfigRoutingMode'] routing_mode: The network-wide routing mode to use. If set to REGIONAL, this network's Cloud Routers will only advertise routes with subnets of this network in the same region as the router. If set to GLOBAL, this network's Cloud Routers will advertise routes with all subnets of this network, across regions. """ if routing_mode is not None: pulumi.set(__self__, "routing_mode", routing_mode) @property @pulumi.getter(name="routingMode") def routing_mode(self) -> Optional[pulumi.Input['NetworkRoutingConfigRoutingMode']]: """ The network-wide routing mode to use. If set to REGIONAL, this network's Cloud Routers will only advertise routes with subnets of this network in the same region as the router. If set to GLOBAL, this network's Cloud Routers will advertise routes with all subnets of this network, across regions. """ return pulumi.get(self, "routing_mode") @routing_mode.setter def routing_mode(self, value: Optional[pulumi.Input['NetworkRoutingConfigRoutingMode']]): pulumi.set(self, "routing_mode", value) @pulumi.input_type class NodeGroupAutoscalingPolicyArgs: def __init__(__self__, *, max_nodes: Optional[pulumi.Input[int]] = None, min_nodes: Optional[pulumi.Input[int]] = None, mode: Optional[pulumi.Input['NodeGroupAutoscalingPolicyMode']] = None): """ :param pulumi.Input[int] max_nodes: The maximum number of nodes that the group should have. Must be set if autoscaling is enabled. Maximum value allowed is 100. :param pulumi.Input[int] min_nodes: The minimum number of nodes that the group should have. :param pulumi.Input['NodeGroupAutoscalingPolicyMode'] mode: The autoscaling mode. Set to one of: ON, OFF, or ONLY_SCALE_OUT. For more information, see Autoscaler modes. """ if max_nodes is not None: pulumi.set(__self__, "max_nodes", max_nodes) if min_nodes is not None: pulumi.set(__self__, "min_nodes", min_nodes) if mode is not None: pulumi.set(__self__, "mode", mode) @property @pulumi.getter(name="maxNodes") def max_nodes(self) -> Optional[pulumi.Input[int]]: """ The maximum number of nodes that the group should have. Must be set if autoscaling is enabled. Maximum value allowed is 100. """ return pulumi.get(self, "max_nodes") @max_nodes.setter def max_nodes(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_nodes", value) @property @pulumi.getter(name="minNodes") def min_nodes(self) -> Optional[pulumi.Input[int]]: """ The minimum number of nodes that the group should have. """ return pulumi.get(self, "min_nodes") @min_nodes.setter def min_nodes(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_nodes", value) @property @pulumi.getter def mode(self) -> Optional[pulumi.Input['NodeGroupAutoscalingPolicyMode']]: """ The autoscaling mode. Set to one of: ON, OFF, or ONLY_SCALE_OUT. For more information, see Autoscaler modes. """ return pulumi.get(self, "mode") @mode.setter def mode(self, value: Optional[pulumi.Input['NodeGroupAutoscalingPolicyMode']]): pulumi.set(self, "mode", value) @pulumi.input_type class NodeGroupMaintenanceWindowArgs: def __init__(__self__, *, start_time: Optional[pulumi.Input[str]] = None): """ Time window specified for daily maintenance operations. GCE's internal maintenance will be performed within this window. :param pulumi.Input[str] start_time: Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. """ if start_time is not None: pulumi.set(__self__, "start_time", start_time) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @pulumi.input_type class NodeTemplateNodeTypeFlexibilityArgs: def __init__(__self__, *, cpus: Optional[pulumi.Input[str]] = None, local_ssd: Optional[pulumi.Input[str]] = None, memory: Optional[pulumi.Input[str]] = None): if cpus is not None: pulumi.set(__self__, "cpus", cpus) if local_ssd is not None: pulumi.set(__self__, "local_ssd", local_ssd) if memory is not None: pulumi.set(__self__, "memory", memory) @property @pulumi.getter def cpus(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "cpus") @cpus.setter def cpus(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cpus", value) @property @pulumi.getter(name="localSsd") def local_ssd(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "local_ssd") @local_ssd.setter def local_ssd(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "local_ssd", value) @property @pulumi.getter def memory(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "memory") @memory.setter def memory(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "memory", value) @pulumi.input_type class NotificationEndpointGrpcSettingsArgs: def __init__(__self__, *, authority: Optional[pulumi.Input[str]] = None, endpoint: Optional[pulumi.Input[str]] = None, payload_name: Optional[pulumi.Input[str]] = None, resend_interval: Optional[pulumi.Input['DurationArgs']] = None, retry_duration_sec: Optional[pulumi.Input[int]] = None): """ Represents a gRPC setting that describes one gRPC notification endpoint and the retry duration attempting to send notification to this endpoint. :param pulumi.Input[str] authority: Optional. If specified, this field is used to set the authority header by the sender of notifications. See https://tools.ietf.org/html/rfc7540#section-8.1.2.3 :param pulumi.Input[str] endpoint: Endpoint to which gRPC notifications are sent. This must be a valid gRPCLB DNS name. :param pulumi.Input[str] payload_name: Optional. If specified, this field is used to populate the "name" field in gRPC requests. :param pulumi.Input['DurationArgs'] resend_interval: Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed. :param pulumi.Input[int] retry_duration_sec: How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number. """ if authority is not None: pulumi.set(__self__, "authority", authority) if endpoint is not None: pulumi.set(__self__, "endpoint", endpoint) if payload_name is not None: pulumi.set(__self__, "payload_name", payload_name) if resend_interval is not None: pulumi.set(__self__, "resend_interval", resend_interval) if retry_duration_sec is not None: pulumi.set(__self__, "retry_duration_sec", retry_duration_sec) @property @pulumi.getter def authority(self) -> Optional[pulumi.Input[str]]: """ Optional. If specified, this field is used to set the authority header by the sender of notifications. See https://tools.ietf.org/html/rfc7540#section-8.1.2.3 """ return pulumi.get(self, "authority") @authority.setter def authority(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "authority", value) @property @pulumi.getter def endpoint(self) -> Optional[pulumi.Input[str]]: """ Endpoint to which gRPC notifications are sent. This must be a valid gRPCLB DNS name. """ return pulumi.get(self, "endpoint") @endpoint.setter def endpoint(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "endpoint", value) @property @pulumi.getter(name="payloadName") def payload_name(self) -> Optional[pulumi.Input[str]]: """ Optional. If specified, this field is used to populate the "name" field in gRPC requests. """ return pulumi.get(self, "payload_name") @payload_name.setter def payload_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "payload_name", value) @property @pulumi.getter(name="resendInterval") def resend_interval(self) -> Optional[pulumi.Input['DurationArgs']]: """ Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed. """ return pulumi.get(self, "resend_interval") @resend_interval.setter def resend_interval(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "resend_interval", value) @property @pulumi.getter(name="retryDurationSec") def retry_duration_sec(self) -> Optional[pulumi.Input[int]]: """ How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number. """ return pulumi.get(self, "retry_duration_sec") @retry_duration_sec.setter def retry_duration_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "retry_duration_sec", value) @pulumi.input_type class OutlierDetectionArgs: def __init__(__self__, *, base_ejection_time: Optional[pulumi.Input['DurationArgs']] = None, consecutive_errors: Optional[pulumi.Input[int]] = None, consecutive_gateway_failure: Optional[pulumi.Input[int]] = None, enforcing_consecutive_errors: Optional[pulumi.Input[int]] = None, enforcing_consecutive_gateway_failure: Optional[pulumi.Input[int]] = None, enforcing_success_rate: Optional[pulumi.Input[int]] = None, interval: Optional[pulumi.Input['DurationArgs']] = None, max_ejection_percent: Optional[pulumi.Input[int]] = None, success_rate_minimum_hosts: Optional[pulumi.Input[int]] = None, success_rate_request_volume: Optional[pulumi.Input[int]] = None, success_rate_stdev_factor: Optional[pulumi.Input[int]] = None): """ Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. :param pulumi.Input['DurationArgs'] base_ejection_time: The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s. :param pulumi.Input[int] consecutive_errors: Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5. :param pulumi.Input[int] consecutive_gateway_failure: The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3. :param pulumi.Input[int] enforcing_consecutive_errors: The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0. :param pulumi.Input[int] enforcing_consecutive_gateway_failure: The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. :param pulumi.Input[int] enforcing_success_rate: The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. :param pulumi.Input['DurationArgs'] interval: Time interval between ejection analysis sweeps. This can result in both new ejections as well as hosts being returned to service. Defaults to 1 second. :param pulumi.Input[int] max_ejection_percent: Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 50%. :param pulumi.Input[int] success_rate_minimum_hosts: The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5. :param pulumi.Input[int] success_rate_request_volume: The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100. :param pulumi.Input[int] success_rate_stdev_factor: This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900. """ if base_ejection_time is not None: pulumi.set(__self__, "base_ejection_time", base_ejection_time) if consecutive_errors is not None: pulumi.set(__self__, "consecutive_errors", consecutive_errors) if consecutive_gateway_failure is not None: pulumi.set(__self__, "consecutive_gateway_failure", consecutive_gateway_failure) if enforcing_consecutive_errors is not None: pulumi.set(__self__, "enforcing_consecutive_errors", enforcing_consecutive_errors) if enforcing_consecutive_gateway_failure is not None: pulumi.set(__self__, "enforcing_consecutive_gateway_failure", enforcing_consecutive_gateway_failure) if enforcing_success_rate is not None: pulumi.set(__self__, "enforcing_success_rate", enforcing_success_rate) if interval is not None: pulumi.set(__self__, "interval", interval) if max_ejection_percent is not None: pulumi.set(__self__, "max_ejection_percent", max_ejection_percent) if success_rate_minimum_hosts is not None: pulumi.set(__self__, "success_rate_minimum_hosts", success_rate_minimum_hosts) if success_rate_request_volume is not None: pulumi.set(__self__, "success_rate_request_volume", success_rate_request_volume) if success_rate_stdev_factor is not None: pulumi.set(__self__, "success_rate_stdev_factor", success_rate_stdev_factor) @property @pulumi.getter(name="baseEjectionTime") def base_ejection_time(self) -> Optional[pulumi.Input['DurationArgs']]: """ The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s. """ return pulumi.get(self, "base_ejection_time") @base_ejection_time.setter def base_ejection_time(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "base_ejection_time", value) @property @pulumi.getter(name="consecutiveErrors") def consecutive_errors(self) -> Optional[pulumi.Input[int]]: """ Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5. """ return pulumi.get(self, "consecutive_errors") @consecutive_errors.setter def consecutive_errors(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "consecutive_errors", value) @property @pulumi.getter(name="consecutiveGatewayFailure") def consecutive_gateway_failure(self) -> Optional[pulumi.Input[int]]: """ The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3. """ return pulumi.get(self, "consecutive_gateway_failure") @consecutive_gateway_failure.setter def consecutive_gateway_failure(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "consecutive_gateway_failure", value) @property @pulumi.getter(name="enforcingConsecutiveErrors") def enforcing_consecutive_errors(self) -> Optional[pulumi.Input[int]]: """ The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0. """ return pulumi.get(self, "enforcing_consecutive_errors") @enforcing_consecutive_errors.setter def enforcing_consecutive_errors(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "enforcing_consecutive_errors", value) @property @pulumi.getter(name="enforcingConsecutiveGatewayFailure") def enforcing_consecutive_gateway_failure(self) -> Optional[pulumi.Input[int]]: """ The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. """ return pulumi.get(self, "enforcing_consecutive_gateway_failure") @enforcing_consecutive_gateway_failure.setter def enforcing_consecutive_gateway_failure(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "enforcing_consecutive_gateway_failure", value) @property @pulumi.getter(name="enforcingSuccessRate") def enforcing_success_rate(self) -> Optional[pulumi.Input[int]]: """ The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. """ return pulumi.get(self, "enforcing_success_rate") @enforcing_success_rate.setter def enforcing_success_rate(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "enforcing_success_rate", value) @property @pulumi.getter def interval(self) -> Optional[pulumi.Input['DurationArgs']]: """ Time interval between ejection analysis sweeps. This can result in both new ejections as well as hosts being returned to service. Defaults to 1 second. """ return pulumi.get(self, "interval") @interval.setter def interval(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "interval", value) @property @pulumi.getter(name="maxEjectionPercent") def max_ejection_percent(self) -> Optional[pulumi.Input[int]]: """ Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 50%. """ return pulumi.get(self, "max_ejection_percent") @max_ejection_percent.setter def max_ejection_percent(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_ejection_percent", value) @property @pulumi.getter(name="successRateMinimumHosts") def success_rate_minimum_hosts(self) -> Optional[pulumi.Input[int]]: """ The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5. """ return pulumi.get(self, "success_rate_minimum_hosts") @success_rate_minimum_hosts.setter def success_rate_minimum_hosts(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "success_rate_minimum_hosts", value) @property @pulumi.getter(name="successRateRequestVolume") def success_rate_request_volume(self) -> Optional[pulumi.Input[int]]: """ The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100. """ return pulumi.get(self, "success_rate_request_volume") @success_rate_request_volume.setter def success_rate_request_volume(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "success_rate_request_volume", value) @property @pulumi.getter(name="successRateStdevFactor") def success_rate_stdev_factor(self) -> Optional[pulumi.Input[int]]: """ This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900. """ return pulumi.get(self, "success_rate_stdev_factor") @success_rate_stdev_factor.setter def success_rate_stdev_factor(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "success_rate_stdev_factor", value) @pulumi.input_type class PacketMirroringFilterArgs: def __init__(__self__, *, cidr_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, direction: Optional[pulumi.Input['PacketMirroringFilterDirection']] = None, ip_protocols: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input[str]]] cidr_ranges: IP CIDR ranges that apply as filter on the source (ingress) or destination (egress) IP in the IP header. Only IPv4 is supported. If no ranges are specified, all traffic that matches the specified IPProtocols is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored. :param pulumi.Input['PacketMirroringFilterDirection'] direction: Direction of traffic to mirror, either INGRESS, EGRESS, or BOTH. The default is BOTH. :param pulumi.Input[Sequence[pulumi.Input[str]]] ip_protocols: Protocols that apply as filter on mirrored traffic. If no protocols are specified, all traffic that matches the specified CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored. """ if cidr_ranges is not None: pulumi.set(__self__, "cidr_ranges", cidr_ranges) if direction is not None: pulumi.set(__self__, "direction", direction) if ip_protocols is not None: pulumi.set(__self__, "ip_protocols", ip_protocols) @property @pulumi.getter(name="cidrRanges") def cidr_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ IP CIDR ranges that apply as filter on the source (ingress) or destination (egress) IP in the IP header. Only IPv4 is supported. If no ranges are specified, all traffic that matches the specified IPProtocols is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored. """ return pulumi.get(self, "cidr_ranges") @cidr_ranges.setter def cidr_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "cidr_ranges", value) @property @pulumi.getter def direction(self) -> Optional[pulumi.Input['PacketMirroringFilterDirection']]: """ Direction of traffic to mirror, either INGRESS, EGRESS, or BOTH. The default is BOTH. """ return pulumi.get(self, "direction") @direction.setter def direction(self, value: Optional[pulumi.Input['PacketMirroringFilterDirection']]): pulumi.set(self, "direction", value) @property @pulumi.getter(name="ipProtocols") def ip_protocols(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Protocols that apply as filter on mirrored traffic. If no protocols are specified, all traffic that matches the specified CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored. """ return pulumi.get(self, "ip_protocols") @ip_protocols.setter def ip_protocols(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ip_protocols", value) @pulumi.input_type class PacketMirroringForwardingRuleInfoArgs: def __init__(__self__, *, url: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] url: Resource URL to the forwarding rule representing the ILB configured as destination of the mirrored traffic. """ if url is not None: pulumi.set(__self__, "url", url) @property @pulumi.getter def url(self) -> Optional[pulumi.Input[str]]: """ Resource URL to the forwarding rule representing the ILB configured as destination of the mirrored traffic. """ return pulumi.get(self, "url") @url.setter def url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url", value) @pulumi.input_type class PacketMirroringMirroredResourceInfoInstanceInfoArgs: def __init__(__self__, *, url: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] url: Resource URL to the virtual machine instance which is being mirrored. """ if url is not None: pulumi.set(__self__, "url", url) @property @pulumi.getter def url(self) -> Optional[pulumi.Input[str]]: """ Resource URL to the virtual machine instance which is being mirrored. """ return pulumi.get(self, "url") @url.setter def url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url", value) @pulumi.input_type class PacketMirroringMirroredResourceInfoSubnetInfoArgs: def __init__(__self__, *, url: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] url: Resource URL to the subnetwork for which traffic from/to all VM instances will be mirrored. """ if url is not None: pulumi.set(__self__, "url", url) @property @pulumi.getter def url(self) -> Optional[pulumi.Input[str]]: """ Resource URL to the subnetwork for which traffic from/to all VM instances will be mirrored. """ return pulumi.get(self, "url") @url.setter def url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url", value) @pulumi.input_type class PacketMirroringMirroredResourceInfoArgs: def __init__(__self__, *, instances: Optional[pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoInstanceInfoArgs']]]] = None, subnetworks: Optional[pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoSubnetInfoArgs']]]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoInstanceInfoArgs']]] instances: A set of virtual machine instances that are being mirrored. They must live in zones contained in the same region as this packetMirroring. Note that this config will apply only to those network interfaces of the Instances that belong to the network specified in this packetMirroring. You may specify a maximum of 50 Instances. :param pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoSubnetInfoArgs']]] subnetworks: A set of subnetworks for which traffic from/to all VM instances will be mirrored. They must live in the same region as this packetMirroring. You may specify a maximum of 5 subnetworks. :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of mirrored tags. Traffic from/to all VM instances that have one or more of these tags will be mirrored. """ if instances is not None: pulumi.set(__self__, "instances", instances) if subnetworks is not None: pulumi.set(__self__, "subnetworks", subnetworks) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoInstanceInfoArgs']]]]: """ A set of virtual machine instances that are being mirrored. They must live in zones contained in the same region as this packetMirroring. Note that this config will apply only to those network interfaces of the Instances that belong to the network specified in this packetMirroring. You may specify a maximum of 50 Instances. """ return pulumi.get(self, "instances") @instances.setter def instances(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoInstanceInfoArgs']]]]): pulumi.set(self, "instances", value) @property @pulumi.getter def subnetworks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoSubnetInfoArgs']]]]: """ A set of subnetworks for which traffic from/to all VM instances will be mirrored. They must live in the same region as this packetMirroring. You may specify a maximum of 5 subnetworks. """ return pulumi.get(self, "subnetworks") @subnetworks.setter def subnetworks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoSubnetInfoArgs']]]]): pulumi.set(self, "subnetworks", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A set of mirrored tags. Traffic from/to all VM instances that have one or more of these tags will be mirrored. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @pulumi.input_type class PacketMirroringNetworkInfoArgs: def __init__(__self__, *, url: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] url: URL of the network resource. """ if url is not None: pulumi.set(__self__, "url", url) @property @pulumi.getter def url(self) -> Optional[pulumi.Input[str]]: """ URL of the network resource. """ return pulumi.get(self, "url") @url.setter def url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url", value) @pulumi.input_type class PathMatcherArgs: def __init__(__self__, *, default_route_action: Optional[pulumi.Input['HttpRouteActionArgs']] = None, default_service: Optional[pulumi.Input[str]] = None, default_url_redirect: Optional[pulumi.Input['HttpRedirectActionArgs']] = None, description: Optional[pulumi.Input[str]] = None, header_action: Optional[pulumi.Input['HttpHeaderActionArgs']] = None, name: Optional[pulumi.Input[str]] = None, path_rules: Optional[pulumi.Input[Sequence[pulumi.Input['PathRuleArgs']]]] = None, route_rules: Optional[pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleArgs']]]] = None): """ A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service is used. :param pulumi.Input['HttpRouteActionArgs'] default_route_action: defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a path matcher's defaultRouteAction. :param pulumi.Input[str] default_service: The full or partial URL to the BackendService resource. This URL is used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: - https://www.googleapis.com/compute/v1/projects/project /global/backendServices/backendService - compute/v1/projects/project/global/backendServices/backendService - global/backendServices/backendService If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. Authorization requires one or more of the following Google IAM permissions on the specified resource default_service: - compute.backendBuckets.use - compute.backendServices.use :param pulumi.Input['HttpRedirectActionArgs'] default_url_redirect: When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input['HttpHeaderActionArgs'] header_action: Specifies changes to request and response headers that need to take effect for the selected backend service. HeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[str] name: The name to which this PathMatcher is referred by the HostRule. :param pulumi.Input[Sequence[pulumi.Input['PathRuleArgs']]] path_rules: The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list. Within a given pathMatcher, only one of pathRules or routeRules must be set. :param pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleArgs']]] route_rules: The list of HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. routeRules are evaluated in order of priority, from the lowest to highest number. Within a given pathMatcher, you can set only one of pathRules or routeRules. """ if default_route_action is not None: pulumi.set(__self__, "default_route_action", default_route_action) if default_service is not None: pulumi.set(__self__, "default_service", default_service) if default_url_redirect is not None: pulumi.set(__self__, "default_url_redirect", default_url_redirect) if description is not None: pulumi.set(__self__, "description", description) if header_action is not None: pulumi.set(__self__, "header_action", header_action) if name is not None: pulumi.set(__self__, "name", name) if path_rules is not None: pulumi.set(__self__, "path_rules", path_rules) if route_rules is not None: pulumi.set(__self__, "route_rules", route_rules) @property @pulumi.getter(name="defaultRouteAction") def default_route_action(self) -> Optional[pulumi.Input['HttpRouteActionArgs']]: """ defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a path matcher's defaultRouteAction. """ return pulumi.get(self, "default_route_action") @default_route_action.setter def default_route_action(self, value: Optional[pulumi.Input['HttpRouteActionArgs']]): pulumi.set(self, "default_route_action", value) @property @pulumi.getter(name="defaultService") def default_service(self) -> Optional[pulumi.Input[str]]: """ The full or partial URL to the BackendService resource. This URL is used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: - https://www.googleapis.com/compute/v1/projects/project /global/backendServices/backendService - compute/v1/projects/project/global/backendServices/backendService - global/backendServices/backendService If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. Authorization requires one or more of the following Google IAM permissions on the specified resource default_service: - compute.backendBuckets.use - compute.backendServices.use """ return pulumi.get(self, "default_service") @default_service.setter def default_service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "default_service", value) @property @pulumi.getter(name="defaultUrlRedirect") def default_url_redirect(self) -> Optional[pulumi.Input['HttpRedirectActionArgs']]: """ When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. """ return pulumi.get(self, "default_url_redirect") @default_url_redirect.setter def default_url_redirect(self, value: Optional[pulumi.Input['HttpRedirectActionArgs']]): pulumi.set(self, "default_url_redirect", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="headerAction") def header_action(self) -> Optional[pulumi.Input['HttpHeaderActionArgs']]: """ Specifies changes to request and response headers that need to take effect for the selected backend service. HeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "header_action") @header_action.setter def header_action(self, value: Optional[pulumi.Input['HttpHeaderActionArgs']]): pulumi.set(self, "header_action", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name to which this PathMatcher is referred by the HostRule. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="pathRules") def path_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PathRuleArgs']]]]: """ The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list. Within a given pathMatcher, only one of pathRules or routeRules must be set. """ return pulumi.get(self, "path_rules") @path_rules.setter def path_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PathRuleArgs']]]]): pulumi.set(self, "path_rules", value) @property @pulumi.getter(name="routeRules") def route_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleArgs']]]]: """ The list of HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. routeRules are evaluated in order of priority, from the lowest to highest number. Within a given pathMatcher, you can set only one of pathRules or routeRules. """ return pulumi.get(self, "route_rules") @route_rules.setter def route_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleArgs']]]]): pulumi.set(self, "route_rules", value) @pulumi.input_type class PathRuleArgs: def __init__(__self__, *, paths: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, route_action: Optional[pulumi.Input['HttpRouteActionArgs']] = None, service: Optional[pulumi.Input[str]] = None, url_redirect: Optional[pulumi.Input['HttpRedirectActionArgs']] = None): """ A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL. :param pulumi.Input[Sequence[pulumi.Input[str]]] paths: The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here. :param pulumi.Input['HttpRouteActionArgs'] route_action: In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for external HTTP(S) load balancers support only the urlRewrite action within a path rule's routeAction. :param pulumi.Input[str] service: The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. :param pulumi.Input['HttpRedirectActionArgs'] url_redirect: When a path pattern is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. """ if paths is not None: pulumi.set(__self__, "paths", paths) if route_action is not None: pulumi.set(__self__, "route_action", route_action) if service is not None: pulumi.set(__self__, "service", service) if url_redirect is not None: pulumi.set(__self__, "url_redirect", url_redirect) @property @pulumi.getter def paths(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here. """ return pulumi.get(self, "paths") @paths.setter def paths(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "paths", value) @property @pulumi.getter(name="routeAction") def route_action(self) -> Optional[pulumi.Input['HttpRouteActionArgs']]: """ In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for external HTTP(S) load balancers support only the urlRewrite action within a path rule's routeAction. """ return pulumi.get(self, "route_action") @route_action.setter def route_action(self, value: Optional[pulumi.Input['HttpRouteActionArgs']]): pulumi.set(self, "route_action", value) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @property @pulumi.getter(name="urlRedirect") def url_redirect(self) -> Optional[pulumi.Input['HttpRedirectActionArgs']]: """ When a path pattern is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. """ return pulumi.get(self, "url_redirect") @url_redirect.setter def url_redirect(self, value: Optional[pulumi.Input['HttpRedirectActionArgs']]): pulumi.set(self, "url_redirect", value) @pulumi.input_type class PublicDelegatedPrefixPublicDelegatedSubPrefixArgs: def __init__(__self__, *, delegatee_project: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, ip_cidr_range: Optional[pulumi.Input[str]] = None, is_address: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None): """ Represents a sub PublicDelegatedPrefix. :param pulumi.Input[str] delegatee_project: Name of the project scoping this PublicDelegatedSubPrefix. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[str] ip_cidr_range: The IPv4 address range, in CIDR format, represented by this sub public delegated prefix. :param pulumi.Input[bool] is_address: Whether the sub prefix is delegated to create Address resources in the delegatee project. :param pulumi.Input[str] name: The name of the sub public delegated prefix. """ if delegatee_project is not None: pulumi.set(__self__, "delegatee_project", delegatee_project) if description is not None: pulumi.set(__self__, "description", description) if ip_cidr_range is not None: pulumi.set(__self__, "ip_cidr_range", ip_cidr_range) if is_address is not None: pulumi.set(__self__, "is_address", is_address) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="delegateeProject") def delegatee_project(self) -> Optional[pulumi.Input[str]]: """ Name of the project scoping this PublicDelegatedSubPrefix. """ return pulumi.get(self, "delegatee_project") @delegatee_project.setter def delegatee_project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "delegatee_project", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="ipCidrRange") def ip_cidr_range(self) -> Optional[pulumi.Input[str]]: """ The IPv4 address range, in CIDR format, represented by this sub public delegated prefix. """ return pulumi.get(self, "ip_cidr_range") @ip_cidr_range.setter def ip_cidr_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_cidr_range", value) @property @pulumi.getter(name="isAddress") def is_address(self) -> Optional[pulumi.Input[bool]]: """ Whether the sub prefix is delegated to create Address resources in the delegatee project. """ return pulumi.get(self, "is_address") @is_address.setter def is_address(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "is_address", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the sub public delegated prefix. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class RequestMirrorPolicyArgs: def __init__(__self__, *, backend_service: Optional[pulumi.Input[str]] = None): """ A policy that specifies how requests intended for the route's backends are shadowed to a separate mirrored backend service. The load balancer doesn't wait for responses from the shadow service. Before sending traffic to the shadow service, the host or authority header is suffixed with -shadow. :param pulumi.Input[str] backend_service: The full or partial URL to the BackendService resource being mirrored to. """ if backend_service is not None: pulumi.set(__self__, "backend_service", backend_service) @property @pulumi.getter(name="backendService") def backend_service(self) -> Optional[pulumi.Input[str]]: """ The full or partial URL to the BackendService resource being mirrored to. """ return pulumi.get(self, "backend_service") @backend_service.setter def backend_service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "backend_service", value) @pulumi.input_type class ReservationAffinityArgs: def __init__(__self__, *, consume_reservation_type: Optional[pulumi.Input['ReservationAffinityConsumeReservationType']] = None, key: Optional[pulumi.Input[str]] = None, values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Specifies the reservations that this instance can consume from. :param pulumi.Input['ReservationAffinityConsumeReservationType'] consume_reservation_type: Specifies the type of reservation from which this instance can consume resources: ANY_RESERVATION (default), SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming reserved instances for examples. :param pulumi.Input[str] key: Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify googleapis.com/reservation-name as the key and specify the name of your reservation as its value. :param pulumi.Input[Sequence[pulumi.Input[str]]] values: Corresponds to the label values of a reservation resource. This can be either a name to a reservation in the same project or "projects/different-project/reservations/some-reservation-name" to target a shared reservation in the same zone but in a different project. """ if consume_reservation_type is not None: pulumi.set(__self__, "consume_reservation_type", consume_reservation_type) if key is not None: pulumi.set(__self__, "key", key) if values is not None: pulumi.set(__self__, "values", values) @property @pulumi.getter(name="consumeReservationType") def consume_reservation_type(self) -> Optional[pulumi.Input['ReservationAffinityConsumeReservationType']]: """ Specifies the type of reservation from which this instance can consume resources: ANY_RESERVATION (default), SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming reserved instances for examples. """ return pulumi.get(self, "consume_reservation_type") @consume_reservation_type.setter def consume_reservation_type(self, value: Optional[pulumi.Input['ReservationAffinityConsumeReservationType']]): pulumi.set(self, "consume_reservation_type", value) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: """ Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify googleapis.com/reservation-name as the key and specify the name of your reservation as its value. """ return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Corresponds to the label values of a reservation resource. This can be either a name to a reservation in the same project or "projects/different-project/reservations/some-reservation-name" to target a shared reservation in the same zone but in a different project. """ return pulumi.get(self, "values") @values.setter def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "values", value) @pulumi.input_type class ReservationArgs: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, share_settings: Optional[pulumi.Input['ShareSettingsArgs']] = None, specific_reservation: Optional[pulumi.Input['AllocationSpecificSKUReservationArgs']] = None, specific_reservation_required: Optional[pulumi.Input[bool]] = None, zone: Optional[pulumi.Input[str]] = None): """ Represents a reservation resource. A reservation ensures that capacity is held in a specific zone even if the reserved VMs are not running. For more information, read Reserving zonal resources. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[str] name: The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input['ShareSettingsArgs'] share_settings: Share-settings for shared-reservation :param pulumi.Input['AllocationSpecificSKUReservationArgs'] specific_reservation: Reservation for instances with specific machine shapes. :param pulumi.Input[bool] specific_reservation_required: Indicates whether the reservation can be consumed by VMs with affinity for "any" reservation. If the field is set, then only VMs that target the reservation by name can consume from this reservation. :param pulumi.Input[str] zone: Zone in which the reservation resides. A zone must be provided if the reservation is created within a commitment. """ if description is not None: pulumi.set(__self__, "description", description) if name is not None: pulumi.set(__self__, "name", name) if share_settings is not None: pulumi.set(__self__, "share_settings", share_settings) if specific_reservation is not None: pulumi.set(__self__, "specific_reservation", specific_reservation) if specific_reservation_required is not None: pulumi.set(__self__, "specific_reservation_required", specific_reservation_required) if zone is not None: pulumi.set(__self__, "zone", zone) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="shareSettings") def share_settings(self) -> Optional[pulumi.Input['ShareSettingsArgs']]: """ Share-settings for shared-reservation """ return pulumi.get(self, "share_settings") @share_settings.setter def share_settings(self, value: Optional[pulumi.Input['ShareSettingsArgs']]): pulumi.set(self, "share_settings", value) @property @pulumi.getter(name="specificReservation") def specific_reservation(self) -> Optional[pulumi.Input['AllocationSpecificSKUReservationArgs']]: """ Reservation for instances with specific machine shapes. """ return pulumi.get(self, "specific_reservation") @specific_reservation.setter def specific_reservation(self, value: Optional[pulumi.Input['AllocationSpecificSKUReservationArgs']]): pulumi.set(self, "specific_reservation", value) @property @pulumi.getter(name="specificReservationRequired") def specific_reservation_required(self) -> Optional[pulumi.Input[bool]]: """ Indicates whether the reservation can be consumed by VMs with affinity for "any" reservation. If the field is set, then only VMs that target the reservation by name can consume from this reservation. """ return pulumi.get(self, "specific_reservation_required") @specific_reservation_required.setter def specific_reservation_required(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "specific_reservation_required", value) @property @pulumi.getter def zone(self) -> Optional[pulumi.Input[str]]: """ Zone in which the reservation resides. A zone must be provided if the reservation is created within a commitment. """ return pulumi.get(self, "zone") @zone.setter def zone(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "zone", value) @pulumi.input_type class ResourceCommitmentArgs: def __init__(__self__, *, accelerator_type: Optional[pulumi.Input[str]] = None, amount: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input['ResourceCommitmentType']] = None): """ Commitment for a particular resource (a Commitment is composed of one or more of these). :param pulumi.Input[str] accelerator_type: Name of the accelerator type resource. Applicable only when the type is ACCELERATOR. :param pulumi.Input[str] amount: The amount of the resource purchased (in a type-dependent unit, such as bytes). For vCPUs, this can just be an integer. For memory, this must be provided in MB. Memory must be a multiple of 256 MB, with up to 6.5GB of memory per every vCPU. :param pulumi.Input['ResourceCommitmentType'] type: Type of resource for which this commitment applies. Possible values are VCPU and MEMORY """ if accelerator_type is not None: pulumi.set(__self__, "accelerator_type", accelerator_type) if amount is not None: pulumi.set(__self__, "amount", amount) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="acceleratorType") def accelerator_type(self) -> Optional[pulumi.Input[str]]: """ Name of the accelerator type resource. Applicable only when the type is ACCELERATOR. """ return pulumi.get(self, "accelerator_type") @accelerator_type.setter def accelerator_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "accelerator_type", value) @property @pulumi.getter def amount(self) -> Optional[pulumi.Input[str]]: """ The amount of the resource purchased (in a type-dependent unit, such as bytes). For vCPUs, this can just be an integer. For memory, this must be provided in MB. Memory must be a multiple of 256 MB, with up to 6.5GB of memory per every vCPU. """ return pulumi.get(self, "amount") @amount.setter def amount(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "amount", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['ResourceCommitmentType']]: """ Type of resource for which this commitment applies. Possible values are VCPU and MEMORY """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['ResourceCommitmentType']]): pulumi.set(self, "type", value) @pulumi.input_type class ResourcePolicyDailyCycleArgs: def __init__(__self__, *, days_in_cycle: Optional[pulumi.Input[int]] = None, start_time: Optional[pulumi.Input[str]] = None): """ Time window specified for daily operations. :param pulumi.Input[int] days_in_cycle: Defines a schedule with units measured in months. The value determines how many months pass between the start of each cycle. :param pulumi.Input[str] start_time: Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. """ if days_in_cycle is not None: pulumi.set(__self__, "days_in_cycle", days_in_cycle) if start_time is not None: pulumi.set(__self__, "start_time", start_time) @property @pulumi.getter(name="daysInCycle") def days_in_cycle(self) -> Optional[pulumi.Input[int]]: """ Defines a schedule with units measured in months. The value determines how many months pass between the start of each cycle. """ return pulumi.get(self, "days_in_cycle") @days_in_cycle.setter def days_in_cycle(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "days_in_cycle", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @pulumi.input_type class ResourcePolicyGroupPlacementPolicyArgs: def __init__(__self__, *, availability_domain_count: Optional[pulumi.Input[int]] = None, collocation: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyCollocation']] = None, locality: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyLocality']] = None, scope: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyScope']] = None, style: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyStyle']] = None, vm_count: Optional[pulumi.Input[int]] = None): """ A GroupPlacementPolicy specifies resource placement configuration. It specifies the failure bucket separation as well as network locality :param pulumi.Input[int] availability_domain_count: The number of availability domains instances will be spread across. If two instances are in different availability domain, they will not be put in the same low latency network :param pulumi.Input['ResourcePolicyGroupPlacementPolicyCollocation'] collocation: Specifies network collocation :param pulumi.Input['ResourcePolicyGroupPlacementPolicyLocality'] locality: Specifies network locality :param pulumi.Input['ResourcePolicyGroupPlacementPolicyScope'] scope: Scope specifies the availability domain to which the VMs should be spread. :param pulumi.Input['ResourcePolicyGroupPlacementPolicyStyle'] style: Specifies instances to hosts placement relationship :param pulumi.Input[int] vm_count: Number of vms in this placement group """ if availability_domain_count is not None: pulumi.set(__self__, "availability_domain_count", availability_domain_count) if collocation is not None: pulumi.set(__self__, "collocation", collocation) if locality is not None: pulumi.set(__self__, "locality", locality) if scope is not None: pulumi.set(__self__, "scope", scope) if style is not None: pulumi.set(__self__, "style", style) if vm_count is not None: pulumi.set(__self__, "vm_count", vm_count) @property @pulumi.getter(name="availabilityDomainCount") def availability_domain_count(self) -> Optional[pulumi.Input[int]]: """ The number of availability domains instances will be spread across. If two instances are in different availability domain, they will not be put in the same low latency network """ return pulumi.get(self, "availability_domain_count") @availability_domain_count.setter def availability_domain_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "availability_domain_count", value) @property @pulumi.getter def collocation(self) -> Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyCollocation']]: """ Specifies network collocation """ return pulumi.get(self, "collocation") @collocation.setter def collocation(self, value: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyCollocation']]): pulumi.set(self, "collocation", value) @property @pulumi.getter def locality(self) -> Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyLocality']]: """ Specifies network locality """ return pulumi.get(self, "locality") @locality.setter def locality(self, value: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyLocality']]): pulumi.set(self, "locality", value) @property @pulumi.getter def scope(self) -> Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyScope']]: """ Scope specifies the availability domain to which the VMs should be spread. """ return pulumi.get(self, "scope") @scope.setter def scope(self, value: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyScope']]): pulumi.set(self, "scope", value) @property @pulumi.getter def style(self) -> Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyStyle']]: """ Specifies instances to hosts placement relationship """ return pulumi.get(self, "style") @style.setter def style(self, value: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyStyle']]): pulumi.set(self, "style", value) @property @pulumi.getter(name="vmCount") def vm_count(self) -> Optional[pulumi.Input[int]]: """ Number of vms in this placement group """ return pulumi.get(self, "vm_count") @vm_count.setter def vm_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "vm_count", value) @pulumi.input_type class ResourcePolicyHourlyCycleArgs: def __init__(__self__, *, hours_in_cycle: Optional[pulumi.Input[int]] = None, start_time: Optional[pulumi.Input[str]] = None): """ Time window specified for hourly operations. :param pulumi.Input[int] hours_in_cycle: Defines a schedule with units measured in hours. The value determines how many hours pass between the start of each cycle. :param pulumi.Input[str] start_time: Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. """ if hours_in_cycle is not None: pulumi.set(__self__, "hours_in_cycle", hours_in_cycle) if start_time is not None: pulumi.set(__self__, "start_time", start_time) @property @pulumi.getter(name="hoursInCycle") def hours_in_cycle(self) -> Optional[pulumi.Input[int]]: """ Defines a schedule with units measured in hours. The value determines how many hours pass between the start of each cycle. """ return pulumi.get(self, "hours_in_cycle") @hours_in_cycle.setter def hours_in_cycle(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "hours_in_cycle", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @pulumi.input_type class ResourcePolicyInstanceSchedulePolicyScheduleArgs: def __init__(__self__, *, schedule: Optional[pulumi.Input[str]] = None): """ Schedule for an instance operation. :param pulumi.Input[str] schedule: Specifies the frequency for the operation, using the unix-cron format. """ if schedule is not None: pulumi.set(__self__, "schedule", schedule) @property @pulumi.getter def schedule(self) -> Optional[pulumi.Input[str]]: """ Specifies the frequency for the operation, using the unix-cron format. """ return pulumi.get(self, "schedule") @schedule.setter def schedule(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "schedule", value) @pulumi.input_type class ResourcePolicyInstanceSchedulePolicyArgs: def __init__(__self__, *, expiration_time: Optional[pulumi.Input[str]] = None, start_time: Optional[pulumi.Input[str]] = None, time_zone: Optional[pulumi.Input[str]] = None, vm_start_schedule: Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs']] = None, vm_stop_schedule: Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs']] = None): """ An InstanceSchedulePolicy specifies when and how frequent certain operations are performed on the instance. :param pulumi.Input[str] expiration_time: The expiration time of the schedule. The timestamp is an RFC3339 string. :param pulumi.Input[str] start_time: The start time of the schedule. The timestamp is an RFC3339 string. :param pulumi.Input[str] time_zone: Specifies the time zone to be used in interpreting Schedule.schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. :param pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs'] vm_start_schedule: Specifies the schedule for starting instances. :param pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs'] vm_stop_schedule: Specifies the schedule for stopping instances. """ if expiration_time is not None: pulumi.set(__self__, "expiration_time", expiration_time) if start_time is not None: pulumi.set(__self__, "start_time", start_time) if time_zone is not None: pulumi.set(__self__, "time_zone", time_zone) if vm_start_schedule is not None: pulumi.set(__self__, "vm_start_schedule", vm_start_schedule) if vm_stop_schedule is not None: pulumi.set(__self__, "vm_stop_schedule", vm_stop_schedule) @property @pulumi.getter(name="expirationTime") def expiration_time(self) -> Optional[pulumi.Input[str]]: """ The expiration time of the schedule. The timestamp is an RFC3339 string. """ return pulumi.get(self, "expiration_time") @expiration_time.setter def expiration_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "expiration_time", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ The start time of the schedule. The timestamp is an RFC3339 string. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @property @pulumi.getter(name="timeZone") def time_zone(self) -> Optional[pulumi.Input[str]]: """ Specifies the time zone to be used in interpreting Schedule.schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. """ return pulumi.get(self, "time_zone") @time_zone.setter def time_zone(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "time_zone", value) @property @pulumi.getter(name="vmStartSchedule") def vm_start_schedule(self) -> Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs']]: """ Specifies the schedule for starting instances. """ return pulumi.get(self, "vm_start_schedule") @vm_start_schedule.setter def vm_start_schedule(self, value: Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs']]): pulumi.set(self, "vm_start_schedule", value) @property @pulumi.getter(name="vmStopSchedule") def vm_stop_schedule(self) -> Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs']]: """ Specifies the schedule for stopping instances. """ return pulumi.get(self, "vm_stop_schedule") @vm_stop_schedule.setter def vm_stop_schedule(self, value: Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs']]): pulumi.set(self, "vm_stop_schedule", value) @pulumi.input_type class ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs: def __init__(__self__, *, max_retention_days: Optional[pulumi.Input[int]] = None, on_policy_switch: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnPolicySwitch']] = None, on_source_disk_delete: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete']] = None): """ Policy for retention of scheduled snapshots. :param pulumi.Input[int] max_retention_days: Maximum age of the snapshot that is allowed to be kept. :param pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete'] on_source_disk_delete: Specifies the behavior to apply to scheduled snapshots when the source disk is deleted. """ if max_retention_days is not None: pulumi.set(__self__, "max_retention_days", max_retention_days) if on_policy_switch is not None: pulumi.set(__self__, "on_policy_switch", on_policy_switch) if on_source_disk_delete is not None: pulumi.set(__self__, "on_source_disk_delete", on_source_disk_delete) @property @pulumi.getter(name="maxRetentionDays") def max_retention_days(self) -> Optional[pulumi.Input[int]]: """ Maximum age of the snapshot that is allowed to be kept. """ return pulumi.get(self, "max_retention_days") @max_retention_days.setter def max_retention_days(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_retention_days", value) @property @pulumi.getter(name="onPolicySwitch") def on_policy_switch(self) -> Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnPolicySwitch']]: return pulumi.get(self, "on_policy_switch") @on_policy_switch.setter def on_policy_switch(self, value: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnPolicySwitch']]): pulumi.set(self, "on_policy_switch", value) @property @pulumi.getter(name="onSourceDiskDelete") def on_source_disk_delete(self) -> Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete']]: """ Specifies the behavior to apply to scheduled snapshots when the source disk is deleted. """ return pulumi.get(self, "on_source_disk_delete") @on_source_disk_delete.setter def on_source_disk_delete(self, value: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete']]): pulumi.set(self, "on_source_disk_delete", value) @pulumi.input_type class ResourcePolicySnapshotSchedulePolicyScheduleArgs: def __init__(__self__, *, daily_schedule: Optional[pulumi.Input['ResourcePolicyDailyCycleArgs']] = None, hourly_schedule: Optional[pulumi.Input['ResourcePolicyHourlyCycleArgs']] = None, weekly_schedule: Optional[pulumi.Input['ResourcePolicyWeeklyCycleArgs']] = None): """ A schedule for disks where the schedueled operations are performed. """ if daily_schedule is not None: pulumi.set(__self__, "daily_schedule", daily_schedule) if hourly_schedule is not None: pulumi.set(__self__, "hourly_schedule", hourly_schedule) if weekly_schedule is not None: pulumi.set(__self__, "weekly_schedule", weekly_schedule) @property @pulumi.getter(name="dailySchedule") def daily_schedule(self) -> Optional[pulumi.Input['ResourcePolicyDailyCycleArgs']]: return pulumi.get(self, "daily_schedule") @daily_schedule.setter def daily_schedule(self, value: Optional[pulumi.Input['ResourcePolicyDailyCycleArgs']]): pulumi.set(self, "daily_schedule", value) @property @pulumi.getter(name="hourlySchedule") def hourly_schedule(self) -> Optional[pulumi.Input['ResourcePolicyHourlyCycleArgs']]: return pulumi.get(self, "hourly_schedule") @hourly_schedule.setter def hourly_schedule(self, value: Optional[pulumi.Input['ResourcePolicyHourlyCycleArgs']]): pulumi.set(self, "hourly_schedule", value) @property @pulumi.getter(name="weeklySchedule") def weekly_schedule(self) -> Optional[pulumi.Input['ResourcePolicyWeeklyCycleArgs']]: return pulumi.get(self, "weekly_schedule") @weekly_schedule.setter def weekly_schedule(self, value: Optional[pulumi.Input['ResourcePolicyWeeklyCycleArgs']]): pulumi.set(self, "weekly_schedule", value) @pulumi.input_type class ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs: def __init__(__self__, *, chain_name: Optional[pulumi.Input[str]] = None, guest_flush: Optional[pulumi.Input[bool]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, storage_locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Specified snapshot properties for scheduled snapshots created by this policy. :param pulumi.Input[str] chain_name: Chain name that the snapshot is created in. :param pulumi.Input[bool] guest_flush: Indication to perform a 'guest aware' snapshot. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to scheduled snapshots. These can be later modified by the setLabels method. Label values may be empty. :param pulumi.Input[Sequence[pulumi.Input[str]]] storage_locations: Cloud Storage bucket storage location of the auto snapshot (regional or multi-regional). """ if chain_name is not None: pulumi.set(__self__, "chain_name", chain_name) if guest_flush is not None: pulumi.set(__self__, "guest_flush", guest_flush) if labels is not None: pulumi.set(__self__, "labels", labels) if storage_locations is not None: pulumi.set(__self__, "storage_locations", storage_locations) @property @pulumi.getter(name="chainName") def chain_name(self) -> Optional[pulumi.Input[str]]: """ Chain name that the snapshot is created in. """ return pulumi.get(self, "chain_name") @chain_name.setter def chain_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "chain_name", value) @property @pulumi.getter(name="guestFlush") def guest_flush(self) -> Optional[pulumi.Input[bool]]: """ Indication to perform a 'guest aware' snapshot. """ return pulumi.get(self, "guest_flush") @guest_flush.setter def guest_flush(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "guest_flush", value) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Labels to apply to scheduled snapshots. These can be later modified by the setLabels method. Label values may be empty. """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter(name="storageLocations") def storage_locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Cloud Storage bucket storage location of the auto snapshot (regional or multi-regional). """ return pulumi.get(self, "storage_locations") @storage_locations.setter def storage_locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "storage_locations", value) @pulumi.input_type class ResourcePolicySnapshotSchedulePolicyArgs: def __init__(__self__, *, retention_policy: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs']] = None, schedule: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyScheduleArgs']] = None, snapshot_properties: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs']] = None): """ A snapshot schedule policy specifies when and how frequently snapshots are to be created for the target disk. Also specifies how many and how long these scheduled snapshots should be retained. :param pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs'] retention_policy: Retention policy applied to snapshots created by this resource policy. :param pulumi.Input['ResourcePolicySnapshotSchedulePolicyScheduleArgs'] schedule: A Vm Maintenance Policy specifies what kind of infrastructure maintenance we are allowed to perform on this VM and when. Schedule that is applied to disks covered by this policy. :param pulumi.Input['ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs'] snapshot_properties: Properties with which snapshots are created such as labels, encryption keys. """ if retention_policy is not None: pulumi.set(__self__, "retention_policy", retention_policy) if schedule is not None: pulumi.set(__self__, "schedule", schedule) if snapshot_properties is not None: pulumi.set(__self__, "snapshot_properties", snapshot_properties) @property @pulumi.getter(name="retentionPolicy") def retention_policy(self) -> Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs']]: """ Retention policy applied to snapshots created by this resource policy. """ return pulumi.get(self, "retention_policy") @retention_policy.setter def retention_policy(self, value: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs']]): pulumi.set(self, "retention_policy", value) @property @pulumi.getter def schedule(self) -> Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyScheduleArgs']]: """ A Vm Maintenance Policy specifies what kind of infrastructure maintenance we are allowed to perform on this VM and when. Schedule that is applied to disks covered by this policy. """ return pulumi.get(self, "schedule") @schedule.setter def schedule(self, value: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyScheduleArgs']]): pulumi.set(self, "schedule", value) @property @pulumi.getter(name="snapshotProperties") def snapshot_properties(self) -> Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs']]: """ Properties with which snapshots are created such as labels, encryption keys. """ return pulumi.get(self, "snapshot_properties") @snapshot_properties.setter def snapshot_properties(self, value: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs']]): pulumi.set(self, "snapshot_properties", value) @pulumi.input_type class ResourcePolicyVmMaintenancePolicyConcurrencyControlArgs: def __init__(__self__, *, concurrency_limit: Optional[pulumi.Input[int]] = None): """ A concurrency control configuration. Defines a group config that, when attached to an instance, recognizes that instance as part of a group of instances where only up the concurrency_limit of instances in that group can undergo simultaneous maintenance. For more information: go/concurrency-control-design-doc """ if concurrency_limit is not None: pulumi.set(__self__, "concurrency_limit", concurrency_limit) @property @pulumi.getter(name="concurrencyLimit") def concurrency_limit(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "concurrency_limit") @concurrency_limit.setter def concurrency_limit(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "concurrency_limit", value) @pulumi.input_type class ResourcePolicyVmMaintenancePolicyMaintenanceWindowArgs: def __init__(__self__, *, daily_maintenance_window: Optional[pulumi.Input['ResourcePolicyDailyCycleArgs']] = None): """ A maintenance window for VMs. When set, we restrict our maintenance operations to this window. """ if daily_maintenance_window is not None: pulumi.set(__self__, "daily_maintenance_window", daily_maintenance_window) @property @pulumi.getter(name="dailyMaintenanceWindow") def daily_maintenance_window(self) -> Optional[pulumi.Input['ResourcePolicyDailyCycleArgs']]: return pulumi.get(self, "daily_maintenance_window") @daily_maintenance_window.setter def daily_maintenance_window(self, value: Optional[pulumi.Input['ResourcePolicyDailyCycleArgs']]): pulumi.set(self, "daily_maintenance_window", value) @pulumi.input_type class ResourcePolicyVmMaintenancePolicyArgs: def __init__(__self__, *, concurrency_control_group: Optional[pulumi.Input['ResourcePolicyVmMaintenancePolicyConcurrencyControlArgs']] = None, maintenance_window: Optional[pulumi.Input['ResourcePolicyVmMaintenancePolicyMaintenanceWindowArgs']] = None): """ :param pulumi.Input['ResourcePolicyVmMaintenancePolicyMaintenanceWindowArgs'] maintenance_window: Maintenance windows that are applied to VMs covered by this policy. """ if concurrency_control_group is not None: pulumi.set(__self__, "concurrency_control_group", concurrency_control_group) if maintenance_window is not None: pulumi.set(__self__, "maintenance_window", maintenance_window) @property @pulumi.getter(name="concurrencyControlGroup") def concurrency_control_group(self) -> Optional[pulumi.Input['ResourcePolicyVmMaintenancePolicyConcurrencyControlArgs']]: return pulumi.get(self, "concurrency_control_group") @concurrency_control_group.setter def concurrency_control_group(self, value: Optional[pulumi.Input['ResourcePolicyVmMaintenancePolicyConcurrencyControlArgs']]): pulumi.set(self, "concurrency_control_group", value) @property @pulumi.getter(name="maintenanceWindow") def maintenance_window(self) -> Optional[pulumi.Input['ResourcePolicyVmMaintenancePolicyMaintenanceWindowArgs']]: """ Maintenance windows that are applied to VMs covered by this policy. """ return pulumi.get(self, "maintenance_window") @maintenance_window.setter def maintenance_window(self, value: Optional[pulumi.Input['ResourcePolicyVmMaintenancePolicyMaintenanceWindowArgs']]): pulumi.set(self, "maintenance_window", value) @pulumi.input_type class ResourcePolicyWeeklyCycleDayOfWeekArgs: def __init__(__self__, *, day: Optional[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekDay']] = None, start_time: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekDay'] day: Defines a schedule that runs on specific days of the week. Specify one or more days. The following options are available: MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY. :param pulumi.Input[str] start_time: Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. """ if day is not None: pulumi.set(__self__, "day", day) if start_time is not None: pulumi.set(__self__, "start_time", start_time) @property @pulumi.getter def day(self) -> Optional[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekDay']]: """ Defines a schedule that runs on specific days of the week. Specify one or more days. The following options are available: MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY. """ return pulumi.get(self, "day") @day.setter def day(self, value: Optional[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekDay']]): pulumi.set(self, "day", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @pulumi.input_type class ResourcePolicyWeeklyCycleArgs: def __init__(__self__, *, day_of_weeks: Optional[pulumi.Input[Sequence[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekArgs']]]] = None): """ Time window specified for weekly operations. :param pulumi.Input[Sequence[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekArgs']]] day_of_weeks: Up to 7 intervals/windows, one for each day of the week. """ if day_of_weeks is not None: pulumi.set(__self__, "day_of_weeks", day_of_weeks) @property @pulumi.getter(name="dayOfWeeks") def day_of_weeks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekArgs']]]]: """ Up to 7 intervals/windows, one for each day of the week. """ return pulumi.get(self, "day_of_weeks") @day_of_weeks.setter def day_of_weeks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekArgs']]]]): pulumi.set(self, "day_of_weeks", value) @pulumi.input_type class RolloutPolicyArgs: def __init__(__self__, *, default_rollout_time: Optional[pulumi.Input[str]] = None, location_rollout_policies: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ A rollout policy configuration. :param pulumi.Input[str] default_rollout_time: An optional RFC3339 timestamp on or after which the update is considered rolled out to any zone that is not explicitly stated. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] location_rollout_policies: Location based rollout policies to apply to the resource. Currently only zone names are supported and must be represented as valid URLs, like: zones/us-central1-a. The value expects an RFC3339 timestamp on or after which the update is considered rolled out to the specified location. """ if default_rollout_time is not None: pulumi.set(__self__, "default_rollout_time", default_rollout_time) if location_rollout_policies is not None: pulumi.set(__self__, "location_rollout_policies", location_rollout_policies) @property @pulumi.getter(name="defaultRolloutTime") def default_rollout_time(self) -> Optional[pulumi.Input[str]]: """ An optional RFC3339 timestamp on or after which the update is considered rolled out to any zone that is not explicitly stated. """ return pulumi.get(self, "default_rollout_time") @default_rollout_time.setter def default_rollout_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "default_rollout_time", value) @property @pulumi.getter(name="locationRolloutPolicies") def location_rollout_policies(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Location based rollout policies to apply to the resource. Currently only zone names are supported and must be represented as valid URLs, like: zones/us-central1-a. The value expects an RFC3339 timestamp on or after which the update is considered rolled out to the specified location. """ return pulumi.get(self, "location_rollout_policies") @location_rollout_policies.setter def location_rollout_policies(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "location_rollout_policies", value) @pulumi.input_type class RouterAdvertisedIpRangeArgs: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, range: Optional[pulumi.Input[str]] = None): """ Description-tagged IP ranges for the router to advertise. :param pulumi.Input[str] description: User-specified description for the IP range. :param pulumi.Input[str] range: The IP range to advertise. The value must be a CIDR-formatted string. """ if description is not None: pulumi.set(__self__, "description", description) if range is not None: pulumi.set(__self__, "range", range) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ User-specified description for the IP range. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def range(self) -> Optional[pulumi.Input[str]]: """ The IP range to advertise. The value must be a CIDR-formatted string. """ return pulumi.get(self, "range") @range.setter def range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "range", value) @pulumi.input_type class RouterBgpPeerBfdArgs: def __init__(__self__, *, min_receive_interval: Optional[pulumi.Input[int]] = None, min_transmit_interval: Optional[pulumi.Input[int]] = None, mode: Optional[pulumi.Input['RouterBgpPeerBfdMode']] = None, multiplier: Optional[pulumi.Input[int]] = None, packet_mode: Optional[pulumi.Input['RouterBgpPeerBfdPacketMode']] = None, session_initialization_mode: Optional[pulumi.Input['RouterBgpPeerBfdSessionInitializationMode']] = None, slow_timer_interval: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] min_receive_interval: The minimum interval, in milliseconds, between BFD control packets received from the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the transmit interval of the other router. If set, this value must be between 1000 and 30000. The default is 1000. :param pulumi.Input[int] min_transmit_interval: The minimum interval, in milliseconds, between BFD control packets transmitted to the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the corresponding receive interval of the other router. If set, this value must be between 1000 and 30000. The default is 1000. :param pulumi.Input['RouterBgpPeerBfdMode'] mode: The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE. :param pulumi.Input[int] multiplier: The number of consecutive BFD packets that must be missed before BFD declares that a peer is unavailable. If set, the value must be a value between 5 and 16. The default is 5. :param pulumi.Input['RouterBgpPeerBfdPacketMode'] packet_mode: The BFD packet mode for this BGP peer. If set to CONTROL_AND_ECHO, BFD echo mode is enabled for this BGP peer. In this mode, if the peer router also has BFD echo mode enabled, BFD echo packets will be sent to the other router. If the peer router does not have BFD echo mode enabled, only control packets will be sent. If set to CONTROL_ONLY, BFD echo mode is disabled for this BGP peer. If this router and the peer router have a multihop connection, this should be set to CONTROL_ONLY as BFD echo mode is only supported on singlehop connections. The default is CONTROL_AND_ECHO. :param pulumi.Input['RouterBgpPeerBfdSessionInitializationMode'] session_initialization_mode: The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE. :param pulumi.Input[int] slow_timer_interval: The minimum interval, in milliseconds, between BFD control packets transmitted to and received from the peer router when BFD echo mode is enabled on both routers. The actual transmit and receive intervals are negotiated between the two routers and are equal to the greater of this value and the corresponding interval on the other router. If set, this value must be between 1000 and 30000. The default is 5000. """ if min_receive_interval is not None: pulumi.set(__self__, "min_receive_interval", min_receive_interval) if min_transmit_interval is not None: pulumi.set(__self__, "min_transmit_interval", min_transmit_interval) if mode is not None: pulumi.set(__self__, "mode", mode) if multiplier is not None: pulumi.set(__self__, "multiplier", multiplier) if packet_mode is not None: pulumi.set(__self__, "packet_mode", packet_mode) if session_initialization_mode is not None: pulumi.set(__self__, "session_initialization_mode", session_initialization_mode) if slow_timer_interval is not None: pulumi.set(__self__, "slow_timer_interval", slow_timer_interval) @property @pulumi.getter(name="minReceiveInterval") def min_receive_interval(self) -> Optional[pulumi.Input[int]]: """ The minimum interval, in milliseconds, between BFD control packets received from the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the transmit interval of the other router. If set, this value must be between 1000 and 30000. The default is 1000. """ return pulumi.get(self, "min_receive_interval") @min_receive_interval.setter def min_receive_interval(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_receive_interval", value) @property @pulumi.getter(name="minTransmitInterval") def min_transmit_interval(self) -> Optional[pulumi.Input[int]]: """ The minimum interval, in milliseconds, between BFD control packets transmitted to the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the corresponding receive interval of the other router. If set, this value must be between 1000 and 30000. The default is 1000. """ return pulumi.get(self, "min_transmit_interval") @min_transmit_interval.setter def min_transmit_interval(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_transmit_interval", value) @property @pulumi.getter def mode(self) -> Optional[pulumi.Input['RouterBgpPeerBfdMode']]: """ The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE. """ return pulumi.get(self, "mode") @mode.setter def mode(self, value: Optional[pulumi.Input['RouterBgpPeerBfdMode']]): pulumi.set(self, "mode", value) @property @pulumi.getter def multiplier(self) -> Optional[pulumi.Input[int]]: """ The number of consecutive BFD packets that must be missed before BFD declares that a peer is unavailable. If set, the value must be a value between 5 and 16. The default is 5. """ return pulumi.get(self, "multiplier") @multiplier.setter def multiplier(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "multiplier", value) @property @pulumi.getter(name="packetMode") def packet_mode(self) -> Optional[pulumi.Input['RouterBgpPeerBfdPacketMode']]: """ The BFD packet mode for this BGP peer. If set to CONTROL_AND_ECHO, BFD echo mode is enabled for this BGP peer. In this mode, if the peer router also has BFD echo mode enabled, BFD echo packets will be sent to the other router. If the peer router does not have BFD echo mode enabled, only control packets will be sent. If set to CONTROL_ONLY, BFD echo mode is disabled for this BGP peer. If this router and the peer router have a multihop connection, this should be set to CONTROL_ONLY as BFD echo mode is only supported on singlehop connections. The default is CONTROL_AND_ECHO. """ return pulumi.get(self, "packet_mode") @packet_mode.setter def packet_mode(self, value: Optional[pulumi.Input['RouterBgpPeerBfdPacketMode']]): pulumi.set(self, "packet_mode", value) @property @pulumi.getter(name="sessionInitializationMode") def session_initialization_mode(self) -> Optional[pulumi.Input['RouterBgpPeerBfdSessionInitializationMode']]: """ The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE. """ return pulumi.get(self, "session_initialization_mode") @session_initialization_mode.setter def session_initialization_mode(self, value: Optional[pulumi.Input['RouterBgpPeerBfdSessionInitializationMode']]): pulumi.set(self, "session_initialization_mode", value) @property @pulumi.getter(name="slowTimerInterval") def slow_timer_interval(self) -> Optional[pulumi.Input[int]]: """ The minimum interval, in milliseconds, between BFD control packets transmitted to and received from the peer router when BFD echo mode is enabled on both routers. The actual transmit and receive intervals are negotiated between the two routers and are equal to the greater of this value and the corresponding interval on the other router. If set, this value must be between 1000 and 30000. The default is 5000. """ return pulumi.get(self, "slow_timer_interval") @slow_timer_interval.setter def slow_timer_interval(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "slow_timer_interval", value) @pulumi.input_type class RouterBgpPeerArgs: def __init__(__self__, *, advertise_mode: Optional[pulumi.Input['RouterBgpPeerAdvertiseMode']] = None, advertised_groups: Optional[pulumi.Input[Sequence[pulumi.Input['RouterBgpPeerAdvertisedGroupsItem']]]] = None, advertised_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]]] = None, advertised_route_priority: Optional[pulumi.Input[int]] = None, bfd: Optional[pulumi.Input['RouterBgpPeerBfdArgs']] = None, enable: Optional[pulumi.Input['RouterBgpPeerEnable']] = None, enable_ipv6: Optional[pulumi.Input[bool]] = None, interface_name: Optional[pulumi.Input[str]] = None, ip_address: Optional[pulumi.Input[str]] = None, ipv6_nexthop_address: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, peer_asn: Optional[pulumi.Input[int]] = None, peer_ip_address: Optional[pulumi.Input[str]] = None, peer_ipv6_nexthop_address: Optional[pulumi.Input[str]] = None, router_appliance_instance: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input['RouterBgpPeerAdvertiseMode'] advertise_mode: User-specified flag to indicate which mode to use for advertisement. :param pulumi.Input[Sequence[pulumi.Input['RouterBgpPeerAdvertisedGroupsItem']]] advertised_groups: User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: - ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the "bgp" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups. :param pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]] advertised_ip_ranges: User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the "bgp" message). These IP ranges are advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges. :param pulumi.Input[int] advertised_route_priority: The priority of routes advertised to this BGP peer. Where there is more than one matching route of maximum length, the routes with the lowest priority value win. :param pulumi.Input['RouterBgpPeerBfdArgs'] bfd: BFD configuration for the BGP peering. :param pulumi.Input['RouterBgpPeerEnable'] enable: The status of the BGP peer connection. If set to FALSE, any active session with the peer is terminated and all associated routing information is removed. If set to TRUE, the peer connection can be established with routing information. The default is TRUE. :param pulumi.Input[bool] enable_ipv6: Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default. :param pulumi.Input[str] interface_name: Name of the interface the BGP peer is associated with. :param pulumi.Input[str] ip_address: IP address of the interface inside Google Cloud Platform. Only IPv4 is supported. :param pulumi.Input[str] ipv6_nexthop_address: IPv6 address of the interface inside Google Cloud Platform. :param pulumi.Input[str] name: Name of this BGP peer. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[int] peer_asn: Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value. :param pulumi.Input[str] peer_ip_address: IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported. :param pulumi.Input[str] peer_ipv6_nexthop_address: IPv6 address of the BGP interface outside Google Cloud Platform. :param pulumi.Input[str] router_appliance_instance: URI of the VM instance that is used as third-party router appliances such as Next Gen Firewalls, Virtual Routers, or Router Appliances. The VM instance must be located in zones contained in the same region as this Cloud Router. The VM instance is the peer side of the BGP session. """ if advertise_mode is not None: pulumi.set(__self__, "advertise_mode", advertise_mode) if advertised_groups is not None: pulumi.set(__self__, "advertised_groups", advertised_groups) if advertised_ip_ranges is not None: pulumi.set(__self__, "advertised_ip_ranges", advertised_ip_ranges) if advertised_route_priority is not None: pulumi.set(__self__, "advertised_route_priority", advertised_route_priority) if bfd is not None: pulumi.set(__self__, "bfd", bfd) if enable is not None: pulumi.set(__self__, "enable", enable) if enable_ipv6 is not None: pulumi.set(__self__, "enable_ipv6", enable_ipv6) if interface_name is not None: pulumi.set(__self__, "interface_name", interface_name) if ip_address is not None: pulumi.set(__self__, "ip_address", ip_address) if ipv6_nexthop_address is not None: pulumi.set(__self__, "ipv6_nexthop_address", ipv6_nexthop_address) if name is not None: pulumi.set(__self__, "name", name) if peer_asn is not None: pulumi.set(__self__, "peer_asn", peer_asn) if peer_ip_address is not None: pulumi.set(__self__, "peer_ip_address", peer_ip_address) if peer_ipv6_nexthop_address is not None: pulumi.set(__self__, "peer_ipv6_nexthop_address", peer_ipv6_nexthop_address) if router_appliance_instance is not None: pulumi.set(__self__, "router_appliance_instance", router_appliance_instance) @property @pulumi.getter(name="advertiseMode") def advertise_mode(self) -> Optional[pulumi.Input['RouterBgpPeerAdvertiseMode']]: """ User-specified flag to indicate which mode to use for advertisement. """ return pulumi.get(self, "advertise_mode") @advertise_mode.setter def advertise_mode(self, value: Optional[pulumi.Input['RouterBgpPeerAdvertiseMode']]): pulumi.set(self, "advertise_mode", value) @property @pulumi.getter(name="advertisedGroups") def advertised_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterBgpPeerAdvertisedGroupsItem']]]]: """ User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: - ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the "bgp" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups. """ return pulumi.get(self, "advertised_groups") @advertised_groups.setter def advertised_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterBgpPeerAdvertisedGroupsItem']]]]): pulumi.set(self, "advertised_groups", value) @property @pulumi.getter(name="advertisedIpRanges") def advertised_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]]]: """ User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the "bgp" message). These IP ranges are advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges. """ return pulumi.get(self, "advertised_ip_ranges") @advertised_ip_ranges.setter def advertised_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]]]): pulumi.set(self, "advertised_ip_ranges", value) @property @pulumi.getter(name="advertisedRoutePriority") def advertised_route_priority(self) -> Optional[pulumi.Input[int]]: """ The priority of routes advertised to this BGP peer. Where there is more than one matching route of maximum length, the routes with the lowest priority value win. """ return pulumi.get(self, "advertised_route_priority") @advertised_route_priority.setter def advertised_route_priority(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "advertised_route_priority", value) @property @pulumi.getter def bfd(self) -> Optional[pulumi.Input['RouterBgpPeerBfdArgs']]: """ BFD configuration for the BGP peering. """ return pulumi.get(self, "bfd") @bfd.setter def bfd(self, value: Optional[pulumi.Input['RouterBgpPeerBfdArgs']]): pulumi.set(self, "bfd", value) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input['RouterBgpPeerEnable']]: """ The status of the BGP peer connection. If set to FALSE, any active session with the peer is terminated and all associated routing information is removed. If set to TRUE, the peer connection can be established with routing information. The default is TRUE. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input['RouterBgpPeerEnable']]): pulumi.set(self, "enable", value) @property @pulumi.getter(name="enableIpv6") def enable_ipv6(self) -> Optional[pulumi.Input[bool]]: """ Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default. """ return pulumi.get(self, "enable_ipv6") @enable_ipv6.setter def enable_ipv6(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_ipv6", value) @property @pulumi.getter(name="interfaceName") def interface_name(self) -> Optional[pulumi.Input[str]]: """ Name of the interface the BGP peer is associated with. """ return pulumi.get(self, "interface_name") @interface_name.setter def interface_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "interface_name", value) @property @pulumi.getter(name="ipAddress") def ip_address(self) -> Optional[pulumi.Input[str]]: """ IP address of the interface inside Google Cloud Platform. Only IPv4 is supported. """ return pulumi.get(self, "ip_address") @ip_address.setter def ip_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_address", value) @property @pulumi.getter(name="ipv6NexthopAddress") def ipv6_nexthop_address(self) -> Optional[pulumi.Input[str]]: """ IPv6 address of the interface inside Google Cloud Platform. """ return pulumi.get(self, "ipv6_nexthop_address") @ipv6_nexthop_address.setter def ipv6_nexthop_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ipv6_nexthop_address", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of this BGP peer. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="peerAsn") def peer_asn(self) -> Optional[pulumi.Input[int]]: """ Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value. """ return pulumi.get(self, "peer_asn") @peer_asn.setter def peer_asn(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "peer_asn", value) @property @pulumi.getter(name="peerIpAddress") def peer_ip_address(self) -> Optional[pulumi.Input[str]]: """ IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported. """ return pulumi.get(self, "peer_ip_address") @peer_ip_address.setter def peer_ip_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "peer_ip_address", value) @property @pulumi.getter(name="peerIpv6NexthopAddress") def peer_ipv6_nexthop_address(self) -> Optional[pulumi.Input[str]]: """ IPv6 address of the BGP interface outside Google Cloud Platform. """ return pulumi.get(self, "peer_ipv6_nexthop_address") @peer_ipv6_nexthop_address.setter def peer_ipv6_nexthop_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "peer_ipv6_nexthop_address", value) @property @pulumi.getter(name="routerApplianceInstance") def router_appliance_instance(self) -> Optional[pulumi.Input[str]]: """ URI of the VM instance that is used as third-party router appliances such as Next Gen Firewalls, Virtual Routers, or Router Appliances. The VM instance must be located in zones contained in the same region as this Cloud Router. The VM instance is the peer side of the BGP session. """ return pulumi.get(self, "router_appliance_instance") @router_appliance_instance.setter def router_appliance_instance(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "router_appliance_instance", value) @pulumi.input_type class RouterBgpArgs: def __init__(__self__, *, advertise_mode: Optional[pulumi.Input['RouterBgpAdvertiseMode']] = None, advertised_groups: Optional[pulumi.Input[Sequence[pulumi.Input['RouterBgpAdvertisedGroupsItem']]]] = None, advertised_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]]] = None, asn: Optional[pulumi.Input[int]] = None, keepalive_interval: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input['RouterBgpAdvertiseMode'] advertise_mode: User-specified flag to indicate which mode to use for advertisement. The options are DEFAULT or CUSTOM. :param pulumi.Input[Sequence[pulumi.Input['RouterBgpAdvertisedGroupsItem']]] advertised_groups: User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups. :param pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]] advertised_ip_ranges: User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges. :param pulumi.Input[int] asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN. :param pulumi.Input[int] keepalive_interval: The interval in seconds between BGP keepalive messages that are sent to the peer. Hold time is three times the interval at which keepalive messages are sent, and the hold time is the maximum number of seconds allowed to elapse between successive keepalive messages that BGP receives from a peer. BGP will use the smaller of either the local hold time value or the peer's hold time value as the hold time for the BGP connection between the two peers. If set, this value must be between 20 and 60. The default is 20. """ if advertise_mode is not None: pulumi.set(__self__, "advertise_mode", advertise_mode) if advertised_groups is not None: pulumi.set(__self__, "advertised_groups", advertised_groups) if advertised_ip_ranges is not None: pulumi.set(__self__, "advertised_ip_ranges", advertised_ip_ranges) if asn is not None: pulumi.set(__self__, "asn", asn) if keepalive_interval is not None: pulumi.set(__self__, "keepalive_interval", keepalive_interval) @property @pulumi.getter(name="advertiseMode") def advertise_mode(self) -> Optional[pulumi.Input['RouterBgpAdvertiseMode']]: """ User-specified flag to indicate which mode to use for advertisement. The options are DEFAULT or CUSTOM. """ return pulumi.get(self, "advertise_mode") @advertise_mode.setter def advertise_mode(self, value: Optional[pulumi.Input['RouterBgpAdvertiseMode']]): pulumi.set(self, "advertise_mode", value) @property @pulumi.getter(name="advertisedGroups") def advertised_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterBgpAdvertisedGroupsItem']]]]: """ User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups. """ return pulumi.get(self, "advertised_groups") @advertised_groups.setter def advertised_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterBgpAdvertisedGroupsItem']]]]): pulumi.set(self, "advertised_groups", value) @property @pulumi.getter(name="advertisedIpRanges") def advertised_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]]]: """ User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges. """ return pulumi.get(self, "advertised_ip_ranges") @advertised_ip_ranges.setter def advertised_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]]]): pulumi.set(self, "advertised_ip_ranges", value) @property @pulumi.getter def asn(self) -> Optional[pulumi.Input[int]]: """ Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN. """ return pulumi.get(self, "asn") @asn.setter def asn(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "asn", value) @property @pulumi.getter(name="keepaliveInterval") def keepalive_interval(self) -> Optional[pulumi.Input[int]]: """ The interval in seconds between BGP keepalive messages that are sent to the peer. Hold time is three times the interval at which keepalive messages are sent, and the hold time is the maximum number of seconds allowed to elapse between successive keepalive messages that BGP receives from a peer. BGP will use the smaller of either the local hold time value or the peer's hold time value as the hold time for the BGP connection between the two peers. If set, this value must be between 20 and 60. The default is 20. """ return pulumi.get(self, "keepalive_interval") @keepalive_interval.setter def keepalive_interval(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "keepalive_interval", value) @pulumi.input_type class RouterInterfaceArgs: def __init__(__self__, *, ip_range: Optional[pulumi.Input[str]] = None, linked_interconnect_attachment: Optional[pulumi.Input[str]] = None, linked_vpn_tunnel: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, private_ip_address: Optional[pulumi.Input[str]] = None, redundant_interface: Optional[pulumi.Input[str]] = None, subnetwork: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] ip_range: IP address and range of the interface. The IP range must be in the RFC3927 link-local IP address space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface. :param pulumi.Input[str] linked_interconnect_attachment: URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance. :param pulumi.Input[str] linked_vpn_tunnel: URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance. :param pulumi.Input[str] name: Name of this interface entry. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] private_ip_address: The regional private internal IP address that is used to establish BGP sessions to a VM instance acting as a third-party Router Appliance, such as a Next Gen Firewall, a Virtual Router, or an SD-WAN VM. :param pulumi.Input[str] redundant_interface: Name of the interface that will be redundant with the current interface you are creating. The redundantInterface must belong to the same Cloud Router as the interface here. To establish the BGP session to a Router Appliance VM, you must create two BGP peers. The two BGP peers must be attached to two separate interfaces that are redundant with each other. The redundant_interface must be 1-63 characters long, and comply with RFC1035. Specifically, the redundant_interface must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] subnetwork: The URI of the subnetwork resource that this interface belongs to, which must be in the same region as the Cloud Router. When you establish a BGP session to a VM instance using this interface, the VM instance must belong to the same subnetwork as the subnetwork specified here. """ if ip_range is not None: pulumi.set(__self__, "ip_range", ip_range) if linked_interconnect_attachment is not None: pulumi.set(__self__, "linked_interconnect_attachment", linked_interconnect_attachment) if linked_vpn_tunnel is not None: pulumi.set(__self__, "linked_vpn_tunnel", linked_vpn_tunnel) if name is not None: pulumi.set(__self__, "name", name) if private_ip_address is not None: pulumi.set(__self__, "private_ip_address", private_ip_address) if redundant_interface is not None: pulumi.set(__self__, "redundant_interface", redundant_interface) if subnetwork is not None: pulumi.set(__self__, "subnetwork", subnetwork) @property @pulumi.getter(name="ipRange") def ip_range(self) -> Optional[pulumi.Input[str]]: """ IP address and range of the interface. The IP range must be in the RFC3927 link-local IP address space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface. """ return pulumi.get(self, "ip_range") @ip_range.setter def ip_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_range", value) @property @pulumi.getter(name="linkedInterconnectAttachment") def linked_interconnect_attachment(self) -> Optional[pulumi.Input[str]]: """ URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance. """ return pulumi.get(self, "linked_interconnect_attachment") @linked_interconnect_attachment.setter def linked_interconnect_attachment(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "linked_interconnect_attachment", value) @property @pulumi.getter(name="linkedVpnTunnel") def linked_vpn_tunnel(self) -> Optional[pulumi.Input[str]]: """ URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance. """ return pulumi.get(self, "linked_vpn_tunnel") @linked_vpn_tunnel.setter def linked_vpn_tunnel(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "linked_vpn_tunnel", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of this interface entry. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="privateIpAddress") def private_ip_address(self) -> Optional[pulumi.Input[str]]: """ The regional private internal IP address that is used to establish BGP sessions to a VM instance acting as a third-party Router Appliance, such as a Next Gen Firewall, a Virtual Router, or an SD-WAN VM. """ return pulumi.get(self, "private_ip_address") @private_ip_address.setter def private_ip_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "private_ip_address", value) @property @pulumi.getter(name="redundantInterface") def redundant_interface(self) -> Optional[pulumi.Input[str]]: """ Name of the interface that will be redundant with the current interface you are creating. The redundantInterface must belong to the same Cloud Router as the interface here. To establish the BGP session to a Router Appliance VM, you must create two BGP peers. The two BGP peers must be attached to two separate interfaces that are redundant with each other. The redundant_interface must be 1-63 characters long, and comply with RFC1035. Specifically, the redundant_interface must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "redundant_interface") @redundant_interface.setter def redundant_interface(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "redundant_interface", value) @property @pulumi.getter def subnetwork(self) -> Optional[pulumi.Input[str]]: """ The URI of the subnetwork resource that this interface belongs to, which must be in the same region as the Cloud Router. When you establish a BGP session to a VM instance using this interface, the VM instance must belong to the same subnetwork as the subnetwork specified here. """ return pulumi.get(self, "subnetwork") @subnetwork.setter def subnetwork(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "subnetwork", value) @pulumi.input_type class RouterNatLogConfigArgs: def __init__(__self__, *, enable: Optional[pulumi.Input[bool]] = None, filter: Optional[pulumi.Input['RouterNatLogConfigFilter']] = None): """ Configuration of logging on a NAT. :param pulumi.Input[bool] enable: Indicates whether or not to export logs. This is false by default. :param pulumi.Input['RouterNatLogConfigFilter'] filter: Specify the desired filtering of logs on this NAT. If unspecified, logs are exported for all connections handled by this NAT. This option can take one of the following values: - ERRORS_ONLY: Export logs only for connection failures. - TRANSLATIONS_ONLY: Export logs only for successful connections. - ALL: Export logs for all connections, successful and unsuccessful. """ if enable is not None: pulumi.set(__self__, "enable", enable) if filter is not None: pulumi.set(__self__, "filter", filter) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input[bool]]: """ Indicates whether or not to export logs. This is false by default. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable", value) @property @pulumi.getter def filter(self) -> Optional[pulumi.Input['RouterNatLogConfigFilter']]: """ Specify the desired filtering of logs on this NAT. If unspecified, logs are exported for all connections handled by this NAT. This option can take one of the following values: - ERRORS_ONLY: Export logs only for connection failures. - TRANSLATIONS_ONLY: Export logs only for successful connections. - ALL: Export logs for all connections, successful and unsuccessful. """ return pulumi.get(self, "filter") @filter.setter def filter(self, value: Optional[pulumi.Input['RouterNatLogConfigFilter']]): pulumi.set(self, "filter", value) @pulumi.input_type class RouterNatRuleActionArgs: def __init__(__self__, *, source_nat_active_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_nat_active_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_nat_drain_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_nat_drain_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input[str]]] source_nat_active_ips: A list of URLs of the IP resources used for this NAT rule. These IP addresses must be valid static external IP addresses assigned to the project. This field is used for public NAT. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_nat_active_ranges: A list of URLs of the subnetworks used as source ranges for this NAT Rule. These subnetworks must have purpose set to PRIVATE_NAT. This field is used for private NAT. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_nat_drain_ips: A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT rule only. This field is used for public NAT. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_nat_drain_ranges: A list of URLs of subnetworks representing source ranges to be drained. This is only supported on patch/update, and these subnetworks must have previously been used as active ranges in this NAT Rule. This field is used for private NAT. """ if source_nat_active_ips is not None: pulumi.set(__self__, "source_nat_active_ips", source_nat_active_ips) if source_nat_active_ranges is not None: pulumi.set(__self__, "source_nat_active_ranges", source_nat_active_ranges) if source_nat_drain_ips is not None: pulumi.set(__self__, "source_nat_drain_ips", source_nat_drain_ips) if source_nat_drain_ranges is not None: pulumi.set(__self__, "source_nat_drain_ranges", source_nat_drain_ranges) @property @pulumi.getter(name="sourceNatActiveIps") def source_nat_active_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of URLs of the IP resources used for this NAT rule. These IP addresses must be valid static external IP addresses assigned to the project. This field is used for public NAT. """ return pulumi.get(self, "source_nat_active_ips") @source_nat_active_ips.setter def source_nat_active_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "source_nat_active_ips", value) @property @pulumi.getter(name="sourceNatActiveRanges") def source_nat_active_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of URLs of the subnetworks used as source ranges for this NAT Rule. These subnetworks must have purpose set to PRIVATE_NAT. This field is used for private NAT. """ return pulumi.get(self, "source_nat_active_ranges") @source_nat_active_ranges.setter def source_nat_active_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "source_nat_active_ranges", value) @property @pulumi.getter(name="sourceNatDrainIps") def source_nat_drain_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT rule only. This field is used for public NAT. """ return pulumi.get(self, "source_nat_drain_ips") @source_nat_drain_ips.setter def source_nat_drain_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "source_nat_drain_ips", value) @property @pulumi.getter(name="sourceNatDrainRanges") def source_nat_drain_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of URLs of subnetworks representing source ranges to be drained. This is only supported on patch/update, and these subnetworks must have previously been used as active ranges in this NAT Rule. This field is used for private NAT. """ return pulumi.get(self, "source_nat_drain_ranges") @source_nat_drain_ranges.setter def source_nat_drain_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "source_nat_drain_ranges", value) @pulumi.input_type class RouterNatRuleArgs: def __init__(__self__, *, action: Optional[pulumi.Input['RouterNatRuleActionArgs']] = None, description: Optional[pulumi.Input[str]] = None, match: Optional[pulumi.Input[str]] = None, rule_number: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input['RouterNatRuleActionArgs'] action: The action to be enforced for traffic that matches this rule. :param pulumi.Input[str] description: An optional description of this rule. :param pulumi.Input[str] match: CEL expression that specifies the match condition that egress traffic from a VM is evaluated against. If it evaluates to true, the corresponding `action` is enforced. The following examples are valid match expressions for public NAT: "inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')" "destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'" The following example is a valid match expression for private NAT: "nexthop.hub == 'https://networkconnectivity.googleapis.com/v1alpha1/projects/my-project/global/hub/hub-1'" :param pulumi.Input[int] rule_number: An integer uniquely identifying a rule in the list. The rule number must be a positive value between 0 and 65000, and must be unique among rules within a NAT. """ if action is not None: pulumi.set(__self__, "action", action) if description is not None: pulumi.set(__self__, "description", description) if match is not None: pulumi.set(__self__, "match", match) if rule_number is not None: pulumi.set(__self__, "rule_number", rule_number) @property @pulumi.getter def action(self) -> Optional[pulumi.Input['RouterNatRuleActionArgs']]: """ The action to be enforced for traffic that matches this rule. """ return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input['RouterNatRuleActionArgs']]): pulumi.set(self, "action", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this rule. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def match(self) -> Optional[pulumi.Input[str]]: """ CEL expression that specifies the match condition that egress traffic from a VM is evaluated against. If it evaluates to true, the corresponding `action` is enforced. The following examples are valid match expressions for public NAT: "inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')" "destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'" The following example is a valid match expression for private NAT: "nexthop.hub == 'https://networkconnectivity.googleapis.com/v1alpha1/projects/my-project/global/hub/hub-1'" """ return pulumi.get(self, "match") @match.setter def match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "match", value) @property @pulumi.getter(name="ruleNumber") def rule_number(self) -> Optional[pulumi.Input[int]]: """ An integer uniquely identifying a rule in the list. The rule number must be a positive value between 0 and 65000, and must be unique among rules within a NAT. """ return pulumi.get(self, "rule_number") @rule_number.setter def rule_number(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "rule_number", value) @pulumi.input_type class RouterNatSubnetworkToNatArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, secondary_ip_range_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_ip_ranges_to_nat: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatSourceIpRangesToNatItem']]]] = None): """ Defines the IP ranges that want to use NAT for a subnetwork. :param pulumi.Input[str] name: URL for the subnetwork resource that will use NAT. :param pulumi.Input[Sequence[pulumi.Input[str]]] secondary_ip_range_names: A list of the secondary ranges of the Subnetwork that are allowed to use NAT. This can be populated only if "LIST_OF_SECONDARY_IP_RANGES" is one of the values in source_ip_ranges_to_nat. :param pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatSourceIpRangesToNatItem']]] source_ip_ranges_to_nat: Specify the options for NAT ranges in the Subnetwork. All options of a single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] Default: [ALL_IP_RANGES] """ if name is not None: pulumi.set(__self__, "name", name) if secondary_ip_range_names is not None: pulumi.set(__self__, "secondary_ip_range_names", secondary_ip_range_names) if source_ip_ranges_to_nat is not None: pulumi.set(__self__, "source_ip_ranges_to_nat", source_ip_ranges_to_nat) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ URL for the subnetwork resource that will use NAT. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="secondaryIpRangeNames") def secondary_ip_range_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of the secondary ranges of the Subnetwork that are allowed to use NAT. This can be populated only if "LIST_OF_SECONDARY_IP_RANGES" is one of the values in source_ip_ranges_to_nat. """ return pulumi.get(self, "secondary_ip_range_names") @secondary_ip_range_names.setter def secondary_ip_range_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "secondary_ip_range_names", value) @property @pulumi.getter(name="sourceIpRangesToNat") def source_ip_ranges_to_nat(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatSourceIpRangesToNatItem']]]]: """ Specify the options for NAT ranges in the Subnetwork. All options of a single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] Default: [ALL_IP_RANGES] """ return pulumi.get(self, "source_ip_ranges_to_nat") @source_ip_ranges_to_nat.setter def source_ip_ranges_to_nat(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatSourceIpRangesToNatItem']]]]): pulumi.set(self, "source_ip_ranges_to_nat", value) @pulumi.input_type class RouterNatArgs: def __init__(__self__, *, drain_nat_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, enable_dynamic_port_allocation: Optional[pulumi.Input[bool]] = None, enable_endpoint_independent_mapping: Optional[pulumi.Input[bool]] = None, icmp_idle_timeout_sec: Optional[pulumi.Input[int]] = None, log_config: Optional[pulumi.Input['RouterNatLogConfigArgs']] = None, max_ports_per_vm: Optional[pulumi.Input[int]] = None, min_ports_per_vm: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, nat_ip_allocate_option: Optional[pulumi.Input['RouterNatNatIpAllocateOption']] = None, nat_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, rules: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatRuleArgs']]]] = None, source_subnetwork_ip_ranges_to_nat: Optional[pulumi.Input['RouterNatSourceSubnetworkIpRangesToNat']] = None, subnetworks: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatArgs']]]] = None, tcp_established_idle_timeout_sec: Optional[pulumi.Input[int]] = None, tcp_time_wait_timeout_sec: Optional[pulumi.Input[int]] = None, tcp_transitory_idle_timeout_sec: Optional[pulumi.Input[int]] = None, type: Optional[pulumi.Input['RouterNatType']] = None, udp_idle_timeout_sec: Optional[pulumi.Input[int]] = None): """ Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided. :param pulumi.Input[Sequence[pulumi.Input[str]]] drain_nat_ips: A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT only. :param pulumi.Input[bool] enable_dynamic_port_allocation: Enable Dynamic Port Allocation. If not specified, it is disabled by default. If set to true, - Dynamic Port Allocation will be enabled on this NAT config. - enableEndpointIndependentMapping cannot be set to true. - If minPorts is set, minPortsPerVm must be set to a power of two greater than or equal to 32. If minPortsPerVm is not set, a minimum of 32 ports will be allocated to a VM from this NAT config. :param pulumi.Input[int] icmp_idle_timeout_sec: Timeout (in seconds) for ICMP connections. Defaults to 30s if not set. :param pulumi.Input['RouterNatLogConfigArgs'] log_config: Configure logging on this NAT. :param pulumi.Input[int] max_ports_per_vm: Maximum number of ports allocated to a VM from this NAT config when Dynamic Port Allocation is enabled. If Dynamic Port Allocation is not enabled, this field has no effect. If Dynamic Port Allocation is enabled, and this field is set, it must be set to a power of two greater than minPortsPerVm, or 64 if minPortsPerVm is not set. If Dynamic Port Allocation is enabled and this field is not set, a maximum of 65536 ports will be allocated to a VM from this NAT config. :param pulumi.Input[int] min_ports_per_vm: Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This is rounded up to the nearest power of 2. For example, if the value of this field is 50, at least 64 ports are allocated to a VM. :param pulumi.Input[str] name: Unique name of this Nat service. The name must be 1-63 characters long and comply with RFC1035. :param pulumi.Input['RouterNatNatIpAllocateOption'] nat_ip_allocate_option: Specify the NatIpAllocateOption, which can take one of the following values: - MANUAL_ONLY: Uses only Nat IP addresses provided by customers. When there are not enough specified Nat IPs, the Nat service fails for new VMs. - AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; customers can't specify any Nat IPs. When choosing AUTO_ONLY, then nat_ip should be empty. :param pulumi.Input[Sequence[pulumi.Input[str]]] nat_ips: A list of URLs of the IP resources used for this Nat service. These IP addresses must be valid static external IP addresses assigned to the project. :param pulumi.Input[Sequence[pulumi.Input['RouterNatRuleArgs']]] rules: A list of rules associated with this NAT. :param pulumi.Input['RouterNatSourceSubnetworkIpRangesToNat'] source_subnetwork_ip_ranges_to_nat: Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region. :param pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatArgs']]] subnetworks: A list of Subnetwork resources whose traffic should be translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS is selected for the SubnetworkIpRangeToNatOption above. :param pulumi.Input[int] tcp_established_idle_timeout_sec: Timeout (in seconds) for TCP established connections. Defaults to 1200s if not set. :param pulumi.Input[int] tcp_time_wait_timeout_sec: Timeout (in seconds) for TCP connections that are in TIME_WAIT state. Defaults to 120s if not set. :param pulumi.Input[int] tcp_transitory_idle_timeout_sec: Timeout (in seconds) for TCP transitory connections. Defaults to 30s if not set. :param pulumi.Input['RouterNatType'] type: Indicates whether this NAT is used for public or private IP translation. If unspecified, it defaults to PUBLIC. :param pulumi.Input[int] udp_idle_timeout_sec: Timeout (in seconds) for UDP connections. Defaults to 30s if not set. """ if drain_nat_ips is not None: pulumi.set(__self__, "drain_nat_ips", drain_nat_ips) if enable_dynamic_port_allocation is not None: pulumi.set(__self__, "enable_dynamic_port_allocation", enable_dynamic_port_allocation) if enable_endpoint_independent_mapping is not None: pulumi.set(__self__, "enable_endpoint_independent_mapping", enable_endpoint_independent_mapping) if icmp_idle_timeout_sec is not None: pulumi.set(__self__, "icmp_idle_timeout_sec", icmp_idle_timeout_sec) if log_config is not None: pulumi.set(__self__, "log_config", log_config) if max_ports_per_vm is not None: pulumi.set(__self__, "max_ports_per_vm", max_ports_per_vm) if min_ports_per_vm is not None: pulumi.set(__self__, "min_ports_per_vm", min_ports_per_vm) if name is not None: pulumi.set(__self__, "name", name) if nat_ip_allocate_option is not None: pulumi.set(__self__, "nat_ip_allocate_option", nat_ip_allocate_option) if nat_ips is not None: pulumi.set(__self__, "nat_ips", nat_ips) if rules is not None: pulumi.set(__self__, "rules", rules) if source_subnetwork_ip_ranges_to_nat is not None: pulumi.set(__self__, "source_subnetwork_ip_ranges_to_nat", source_subnetwork_ip_ranges_to_nat) if subnetworks is not None: pulumi.set(__self__, "subnetworks", subnetworks) if tcp_established_idle_timeout_sec is not None: pulumi.set(__self__, "tcp_established_idle_timeout_sec", tcp_established_idle_timeout_sec) if tcp_time_wait_timeout_sec is not None: pulumi.set(__self__, "tcp_time_wait_timeout_sec", tcp_time_wait_timeout_sec) if tcp_transitory_idle_timeout_sec is not None: pulumi.set(__self__, "tcp_transitory_idle_timeout_sec", tcp_transitory_idle_timeout_sec) if type is not None: pulumi.set(__self__, "type", type) if udp_idle_timeout_sec is not None: pulumi.set(__self__, "udp_idle_timeout_sec", udp_idle_timeout_sec) @property @pulumi.getter(name="drainNatIps") def drain_nat_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT only. """ return pulumi.get(self, "drain_nat_ips") @drain_nat_ips.setter def drain_nat_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "drain_nat_ips", value) @property @pulumi.getter(name="enableDynamicPortAllocation") def enable_dynamic_port_allocation(self) -> Optional[pulumi.Input[bool]]: """ Enable Dynamic Port Allocation. If not specified, it is disabled by default. If set to true, - Dynamic Port Allocation will be enabled on this NAT config. - enableEndpointIndependentMapping cannot be set to true. - If minPorts is set, minPortsPerVm must be set to a power of two greater than or equal to 32. If minPortsPerVm is not set, a minimum of 32 ports will be allocated to a VM from this NAT config. """ return pulumi.get(self, "enable_dynamic_port_allocation") @enable_dynamic_port_allocation.setter def enable_dynamic_port_allocation(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_dynamic_port_allocation", value) @property @pulumi.getter(name="enableEndpointIndependentMapping") def enable_endpoint_independent_mapping(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enable_endpoint_independent_mapping") @enable_endpoint_independent_mapping.setter def enable_endpoint_independent_mapping(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_endpoint_independent_mapping", value) @property @pulumi.getter(name="icmpIdleTimeoutSec") def icmp_idle_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Timeout (in seconds) for ICMP connections. Defaults to 30s if not set. """ return pulumi.get(self, "icmp_idle_timeout_sec") @icmp_idle_timeout_sec.setter def icmp_idle_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "icmp_idle_timeout_sec", value) @property @pulumi.getter(name="logConfig") def log_config(self) -> Optional[pulumi.Input['RouterNatLogConfigArgs']]: """ Configure logging on this NAT. """ return pulumi.get(self, "log_config") @log_config.setter def log_config(self, value: Optional[pulumi.Input['RouterNatLogConfigArgs']]): pulumi.set(self, "log_config", value) @property @pulumi.getter(name="maxPortsPerVm") def max_ports_per_vm(self) -> Optional[pulumi.Input[int]]: """ Maximum number of ports allocated to a VM from this NAT config when Dynamic Port Allocation is enabled. If Dynamic Port Allocation is not enabled, this field has no effect. If Dynamic Port Allocation is enabled, and this field is set, it must be set to a power of two greater than minPortsPerVm, or 64 if minPortsPerVm is not set. If Dynamic Port Allocation is enabled and this field is not set, a maximum of 65536 ports will be allocated to a VM from this NAT config. """ return pulumi.get(self, "max_ports_per_vm") @max_ports_per_vm.setter def max_ports_per_vm(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_ports_per_vm", value) @property @pulumi.getter(name="minPortsPerVm") def min_ports_per_vm(self) -> Optional[pulumi.Input[int]]: """ Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This is rounded up to the nearest power of 2. For example, if the value of this field is 50, at least 64 ports are allocated to a VM. """ return pulumi.get(self, "min_ports_per_vm") @min_ports_per_vm.setter def min_ports_per_vm(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_ports_per_vm", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Unique name of this Nat service. The name must be 1-63 characters long and comply with RFC1035. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="natIpAllocateOption") def nat_ip_allocate_option(self) -> Optional[pulumi.Input['RouterNatNatIpAllocateOption']]: """ Specify the NatIpAllocateOption, which can take one of the following values: - MANUAL_ONLY: Uses only Nat IP addresses provided by customers. When there are not enough specified Nat IPs, the Nat service fails for new VMs. - AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; customers can't specify any Nat IPs. When choosing AUTO_ONLY, then nat_ip should be empty. """ return pulumi.get(self, "nat_ip_allocate_option") @nat_ip_allocate_option.setter def nat_ip_allocate_option(self, value: Optional[pulumi.Input['RouterNatNatIpAllocateOption']]): pulumi.set(self, "nat_ip_allocate_option", value) @property @pulumi.getter(name="natIps") def nat_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of URLs of the IP resources used for this Nat service. These IP addresses must be valid static external IP addresses assigned to the project. """ return pulumi.get(self, "nat_ips") @nat_ips.setter def nat_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "nat_ips", value) @property @pulumi.getter def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatRuleArgs']]]]: """ A list of rules associated with this NAT. """ return pulumi.get(self, "rules") @rules.setter def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatRuleArgs']]]]): pulumi.set(self, "rules", value) @property @pulumi.getter(name="sourceSubnetworkIpRangesToNat") def source_subnetwork_ip_ranges_to_nat(self) -> Optional[pulumi.Input['RouterNatSourceSubnetworkIpRangesToNat']]: """ Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region. """ return pulumi.get(self, "source_subnetwork_ip_ranges_to_nat") @source_subnetwork_ip_ranges_to_nat.setter def source_subnetwork_ip_ranges_to_nat(self, value: Optional[pulumi.Input['RouterNatSourceSubnetworkIpRangesToNat']]): pulumi.set(self, "source_subnetwork_ip_ranges_to_nat", value) @property @pulumi.getter def subnetworks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatArgs']]]]: """ A list of Subnetwork resources whose traffic should be translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS is selected for the SubnetworkIpRangeToNatOption above. """ return pulumi.get(self, "subnetworks") @subnetworks.setter def subnetworks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatArgs']]]]): pulumi.set(self, "subnetworks", value) @property @pulumi.getter(name="tcpEstablishedIdleTimeoutSec") def tcp_established_idle_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Timeout (in seconds) for TCP established connections. Defaults to 1200s if not set. """ return pulumi.get(self, "tcp_established_idle_timeout_sec") @tcp_established_idle_timeout_sec.setter def tcp_established_idle_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "tcp_established_idle_timeout_sec", value) @property @pulumi.getter(name="tcpTimeWaitTimeoutSec") def tcp_time_wait_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Timeout (in seconds) for TCP connections that are in TIME_WAIT state. Defaults to 120s if not set. """ return pulumi.get(self, "tcp_time_wait_timeout_sec") @tcp_time_wait_timeout_sec.setter def tcp_time_wait_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "tcp_time_wait_timeout_sec", value) @property @pulumi.getter(name="tcpTransitoryIdleTimeoutSec") def tcp_transitory_idle_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Timeout (in seconds) for TCP transitory connections. Defaults to 30s if not set. """ return pulumi.get(self, "tcp_transitory_idle_timeout_sec") @tcp_transitory_idle_timeout_sec.setter def tcp_transitory_idle_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "tcp_transitory_idle_timeout_sec", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['RouterNatType']]: """ Indicates whether this NAT is used for public or private IP translation. If unspecified, it defaults to PUBLIC. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['RouterNatType']]): pulumi.set(self, "type", value) @property @pulumi.getter(name="udpIdleTimeoutSec") def udp_idle_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Timeout (in seconds) for UDP connections. Defaults to 30s if not set. """ return pulumi.get(self, "udp_idle_timeout_sec") @udp_idle_timeout_sec.setter def udp_idle_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "udp_idle_timeout_sec", value) @pulumi.input_type class RuleArgs: def __init__(__self__, *, action: Optional[pulumi.Input['RuleAction']] = None, conditions: Optional[pulumi.Input[Sequence[pulumi.Input['ConditionArgs']]]] = None, description: Optional[pulumi.Input[str]] = None, ins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, log_configs: Optional[pulumi.Input[Sequence[pulumi.Input['LogConfigArgs']]]] = None, not_ins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input['RuleAction'] action: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input['ConditionArgs']]] conditions: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] description: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input[str]]] ins: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input['LogConfigArgs']]] log_configs: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input[str]]] not_ins: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input[str]]] permissions: This is deprecated and has no effect. Do not use. """ if action is not None: pulumi.set(__self__, "action", action) if conditions is not None: pulumi.set(__self__, "conditions", conditions) if description is not None: pulumi.set(__self__, "description", description) if ins is not None: pulumi.set(__self__, "ins", ins) if log_configs is not None: pulumi.set(__self__, "log_configs", log_configs) if not_ins is not None: pulumi.set(__self__, "not_ins", not_ins) if permissions is not None: pulumi.set(__self__, "permissions", permissions) @property @pulumi.getter def action(self) -> Optional[pulumi.Input['RuleAction']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input['RuleAction']]): pulumi.set(self, "action", value) @property @pulumi.getter def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConditionArgs']]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "conditions") @conditions.setter def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConditionArgs']]]]): pulumi.set(self, "conditions", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def ins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "ins") @ins.setter def ins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ins", value) @property @pulumi.getter(name="logConfigs") def log_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LogConfigArgs']]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "log_configs") @log_configs.setter def log_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LogConfigArgs']]]]): pulumi.set(self, "log_configs", value) @property @pulumi.getter(name="notIns") def not_ins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "not_ins") @not_ins.setter def not_ins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "not_ins", value) @property @pulumi.getter def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "permissions") @permissions.setter def permissions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "permissions", value) @pulumi.input_type class SSLHealthCheckArgs: def __init__(__self__, *, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, port_specification: Optional[pulumi.Input['SSLHealthCheckPortSpecification']] = None, proxy_header: Optional[pulumi.Input['SSLHealthCheckProxyHeader']] = None, request: Optional[pulumi.Input[str]] = None, response: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['SSLHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, SSL health check follows behavior specified in port and portName fields. :param pulumi.Input['SSLHealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request: The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII. :param pulumi.Input[str] response: The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. """ if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if port_specification is not None: pulumi.set(__self__, "port_specification", port_specification) if proxy_header is not None: pulumi.set(__self__, "proxy_header", proxy_header) if request is not None: pulumi.set(__self__, "request", request) if response is not None: pulumi.set(__self__, "response", response) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter(name="portSpecification") def port_specification(self) -> Optional[pulumi.Input['SSLHealthCheckPortSpecification']]: """ Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, SSL health check follows behavior specified in port and portName fields. """ return pulumi.get(self, "port_specification") @port_specification.setter def port_specification(self, value: Optional[pulumi.Input['SSLHealthCheckPortSpecification']]): pulumi.set(self, "port_specification", value) @property @pulumi.getter(name="proxyHeader") def proxy_header(self) -> Optional[pulumi.Input['SSLHealthCheckProxyHeader']]: """ Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. """ return pulumi.get(self, "proxy_header") @proxy_header.setter def proxy_header(self, value: Optional[pulumi.Input['SSLHealthCheckProxyHeader']]): pulumi.set(self, "proxy_header", value) @property @pulumi.getter def request(self) -> Optional[pulumi.Input[str]]: """ The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII. """ return pulumi.get(self, "request") @request.setter def request(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request", value) @property @pulumi.getter def response(self) -> Optional[pulumi.Input[str]]: """ The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. """ return pulumi.get(self, "response") @response.setter def response(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "response", value) @pulumi.input_type class SavedDiskArgs: def __init__(__self__, *, source_disk: Optional[pulumi.Input[str]] = None): """ An instance-attached disk resource. :param pulumi.Input[str] source_disk: Specifies a URL of the disk attached to the source instance. """ if source_disk is not None: pulumi.set(__self__, "source_disk", source_disk) @property @pulumi.getter(name="sourceDisk") def source_disk(self) -> Optional[pulumi.Input[str]]: """ Specifies a URL of the disk attached to the source instance. """ return pulumi.get(self, "source_disk") @source_disk.setter def source_disk(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_disk", value) @pulumi.input_type class SchedulingNodeAffinityArgs: def __init__(__self__, *, key: Optional[pulumi.Input[str]] = None, operator: Optional[pulumi.Input['SchedulingNodeAffinityOperator']] = None, values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Node Affinity: the configuration of desired nodes onto which this Instance could be scheduled. :param pulumi.Input[str] key: Corresponds to the label key of Node resource. :param pulumi.Input['SchedulingNodeAffinityOperator'] operator: Defines the operation of node selection. Valid operators are IN for affinity and NOT_IN for anti-affinity. :param pulumi.Input[Sequence[pulumi.Input[str]]] values: Corresponds to the label values of Node resource. """ if key is not None: pulumi.set(__self__, "key", key) if operator is not None: pulumi.set(__self__, "operator", operator) if values is not None: pulumi.set(__self__, "values", values) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: """ Corresponds to the label key of Node resource. """ return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def operator(self) -> Optional[pulumi.Input['SchedulingNodeAffinityOperator']]: """ Defines the operation of node selection. Valid operators are IN for affinity and NOT_IN for anti-affinity. """ return pulumi.get(self, "operator") @operator.setter def operator(self, value: Optional[pulumi.Input['SchedulingNodeAffinityOperator']]): pulumi.set(self, "operator", value) @property @pulumi.getter def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Corresponds to the label values of Node resource. """ return pulumi.get(self, "values") @values.setter def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "values", value) @pulumi.input_type class SchedulingArgs: def __init__(__self__, *, automatic_restart: Optional[pulumi.Input[bool]] = None, availability_domain: Optional[pulumi.Input[int]] = None, current_cpus: Optional[pulumi.Input[int]] = None, current_memory_mb: Optional[pulumi.Input[str]] = None, host_error_timeout_seconds: Optional[pulumi.Input[int]] = None, instance_termination_action: Optional[pulumi.Input['SchedulingInstanceTerminationAction']] = None, latency_tolerant: Optional[pulumi.Input[bool]] = None, location_hint: Optional[pulumi.Input[str]] = None, maintenance_freeze_duration_hours: Optional[pulumi.Input[int]] = None, maintenance_interval: Optional[pulumi.Input['SchedulingMaintenanceInterval']] = None, max_run_duration: Optional[pulumi.Input['DurationArgs']] = None, min_node_cpus: Optional[pulumi.Input[int]] = None, node_affinities: Optional[pulumi.Input[Sequence[pulumi.Input['SchedulingNodeAffinityArgs']]]] = None, on_host_maintenance: Optional[pulumi.Input['SchedulingOnHostMaintenance']] = None, preemptible: Optional[pulumi.Input[bool]] = None, provisioning_model: Optional[pulumi.Input['SchedulingProvisioningModel']] = None, termination_time: Optional[pulumi.Input[str]] = None): """ Sets the scheduling options for an Instance. NextID: 21 :param pulumi.Input[bool] automatic_restart: Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted. By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine. :param pulumi.Input[int] availability_domain: Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. :param pulumi.Input[int] current_cpus: Current number of vCPUs available for VM. 0 or unset means default vCPUs of the current machine type. :param pulumi.Input[str] current_memory_mb: Current amount of memory (in MB) available for VM. 0 or unset means default amount of memory of the current machine type. :param pulumi.Input[int] host_error_timeout_seconds: Specify the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used. :param pulumi.Input['SchedulingInstanceTerminationAction'] instance_termination_action: Specifies the termination action for the instance. :param pulumi.Input[bool] latency_tolerant: Defines whether the instance is tolerant of higher cpu latency. This can only be set during instance creation, or when the instance is not currently running. It must not be set if the preemptible option is also set. :param pulumi.Input[str] location_hint: An opaque location hint used to place the instance close to other resources. This field is for use by internal tools that use the public API. :param pulumi.Input[int] maintenance_freeze_duration_hours: Specifies the number of hours after VM instance creation where the VM won't be scheduled for maintenance. :param pulumi.Input['SchedulingMaintenanceInterval'] maintenance_interval: For more information about maintenance intervals, see Setting maintenance intervals. :param pulumi.Input['DurationArgs'] max_run_duration: Specifies the max run duration for the given instance. If specified, the instance termination action will be performed at the end of the run duration. :param pulumi.Input[int] min_node_cpus: The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. :param pulumi.Input[Sequence[pulumi.Input['SchedulingNodeAffinityArgs']]] node_affinities: A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information. Overrides reservationAffinity. :param pulumi.Input['SchedulingOnHostMaintenance'] on_host_maintenance: Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options. :param pulumi.Input[bool] preemptible: Defines whether the instance is preemptible. This can only be set during instance creation or while the instance is stopped and therefore, in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. :param pulumi.Input['SchedulingProvisioningModel'] provisioning_model: Specifies the provisioning model of the instance. :param pulumi.Input[str] termination_time: Specifies the timestamp, when the instance will be terminated, in RFC3339 text format. If specified, the instance termination action will be performed at the termination time. """ if automatic_restart is not None: pulumi.set(__self__, "automatic_restart", automatic_restart) if availability_domain is not None: pulumi.set(__self__, "availability_domain", availability_domain) if current_cpus is not None: pulumi.set(__self__, "current_cpus", current_cpus) if current_memory_mb is not None: pulumi.set(__self__, "current_memory_mb", current_memory_mb) if host_error_timeout_seconds is not None: pulumi.set(__self__, "host_error_timeout_seconds", host_error_timeout_seconds) if instance_termination_action is not None: pulumi.set(__self__, "instance_termination_action", instance_termination_action) if latency_tolerant is not None: pulumi.set(__self__, "latency_tolerant", latency_tolerant) if location_hint is not None: pulumi.set(__self__, "location_hint", location_hint) if maintenance_freeze_duration_hours is not None: pulumi.set(__self__, "maintenance_freeze_duration_hours", maintenance_freeze_duration_hours) if maintenance_interval is not None: pulumi.set(__self__, "maintenance_interval", maintenance_interval) if max_run_duration is not None: pulumi.set(__self__, "max_run_duration", max_run_duration) if min_node_cpus is not None: pulumi.set(__self__, "min_node_cpus", min_node_cpus) if node_affinities is not None: pulumi.set(__self__, "node_affinities", node_affinities) if on_host_maintenance is not None: pulumi.set(__self__, "on_host_maintenance", on_host_maintenance) if preemptible is not None: pulumi.set(__self__, "preemptible", preemptible) if provisioning_model is not None: pulumi.set(__self__, "provisioning_model", provisioning_model) if termination_time is not None: pulumi.set(__self__, "termination_time", termination_time) @property @pulumi.getter(name="automaticRestart") def automatic_restart(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted. By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine. """ return pulumi.get(self, "automatic_restart") @automatic_restart.setter def automatic_restart(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "automatic_restart", value) @property @pulumi.getter(name="availabilityDomain") def availability_domain(self) -> Optional[pulumi.Input[int]]: """ Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. """ return pulumi.get(self, "availability_domain") @availability_domain.setter def availability_domain(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "availability_domain", value) @property @pulumi.getter(name="currentCpus") def current_cpus(self) -> Optional[pulumi.Input[int]]: """ Current number of vCPUs available for VM. 0 or unset means default vCPUs of the current machine type. """ return pulumi.get(self, "current_cpus") @current_cpus.setter def current_cpus(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "current_cpus", value) @property @pulumi.getter(name="currentMemoryMb") def current_memory_mb(self) -> Optional[pulumi.Input[str]]: """ Current amount of memory (in MB) available for VM. 0 or unset means default amount of memory of the current machine type. """ return pulumi.get(self, "current_memory_mb") @current_memory_mb.setter def current_memory_mb(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "current_memory_mb", value) @property @pulumi.getter(name="hostErrorTimeoutSeconds") def host_error_timeout_seconds(self) -> Optional[pulumi.Input[int]]: """ Specify the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used. """ return pulumi.get(self, "host_error_timeout_seconds") @host_error_timeout_seconds.setter def host_error_timeout_seconds(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "host_error_timeout_seconds", value) @property @pulumi.getter(name="instanceTerminationAction") def instance_termination_action(self) -> Optional[pulumi.Input['SchedulingInstanceTerminationAction']]: """ Specifies the termination action for the instance. """ return pulumi.get(self, "instance_termination_action") @instance_termination_action.setter def instance_termination_action(self, value: Optional[pulumi.Input['SchedulingInstanceTerminationAction']]): pulumi.set(self, "instance_termination_action", value) @property @pulumi.getter(name="latencyTolerant") def latency_tolerant(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance is tolerant of higher cpu latency. This can only be set during instance creation, or when the instance is not currently running. It must not be set if the preemptible option is also set. """ return pulumi.get(self, "latency_tolerant") @latency_tolerant.setter def latency_tolerant(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "latency_tolerant", value) @property @pulumi.getter(name="locationHint") def location_hint(self) -> Optional[pulumi.Input[str]]: """ An opaque location hint used to place the instance close to other resources. This field is for use by internal tools that use the public API. """ return pulumi.get(self, "location_hint") @location_hint.setter def location_hint(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location_hint", value) @property @pulumi.getter(name="maintenanceFreezeDurationHours") def maintenance_freeze_duration_hours(self) -> Optional[pulumi.Input[int]]: """ Specifies the number of hours after VM instance creation where the VM won't be scheduled for maintenance. """ return pulumi.get(self, "maintenance_freeze_duration_hours") @maintenance_freeze_duration_hours.setter def maintenance_freeze_duration_hours(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "maintenance_freeze_duration_hours", value) @property @pulumi.getter(name="maintenanceInterval") def maintenance_interval(self) -> Optional[pulumi.Input['SchedulingMaintenanceInterval']]: """ For more information about maintenance intervals, see Setting maintenance intervals. """ return pulumi.get(self, "maintenance_interval") @maintenance_interval.setter def maintenance_interval(self, value: Optional[pulumi.Input['SchedulingMaintenanceInterval']]): pulumi.set(self, "maintenance_interval", value) @property @pulumi.getter(name="maxRunDuration") def max_run_duration(self) -> Optional[pulumi.Input['DurationArgs']]: """ Specifies the max run duration for the given instance. If specified, the instance termination action will be performed at the end of the run duration. """ return pulumi.get(self, "max_run_duration") @max_run_duration.setter def max_run_duration(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "max_run_duration", value) @property @pulumi.getter(name="minNodeCpus") def min_node_cpus(self) -> Optional[pulumi.Input[int]]: """ The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. """ return pulumi.get(self, "min_node_cpus") @min_node_cpus.setter def min_node_cpus(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_node_cpus", value) @property @pulumi.getter(name="nodeAffinities") def node_affinities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SchedulingNodeAffinityArgs']]]]: """ A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information. Overrides reservationAffinity. """ return pulumi.get(self, "node_affinities") @node_affinities.setter def node_affinities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SchedulingNodeAffinityArgs']]]]): pulumi.set(self, "node_affinities", value) @property @pulumi.getter(name="onHostMaintenance") def on_host_maintenance(self) -> Optional[pulumi.Input['SchedulingOnHostMaintenance']]: """ Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options. """ return pulumi.get(self, "on_host_maintenance") @on_host_maintenance.setter def on_host_maintenance(self, value: Optional[pulumi.Input['SchedulingOnHostMaintenance']]): pulumi.set(self, "on_host_maintenance", value) @property @pulumi.getter def preemptible(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance is preemptible. This can only be set during instance creation or while the instance is stopped and therefore, in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. """ return pulumi.get(self, "preemptible") @preemptible.setter def preemptible(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "preemptible", value) @property @pulumi.getter(name="provisioningModel") def provisioning_model(self) -> Optional[pulumi.Input['SchedulingProvisioningModel']]: """ Specifies the provisioning model of the instance. """ return pulumi.get(self, "provisioning_model") @provisioning_model.setter def provisioning_model(self, value: Optional[pulumi.Input['SchedulingProvisioningModel']]): pulumi.set(self, "provisioning_model", value) @property @pulumi.getter(name="terminationTime") def termination_time(self) -> Optional[pulumi.Input[str]]: """ Specifies the timestamp, when the instance will be terminated, in RFC3339 text format. If specified, the instance termination action will be performed at the termination time. """ return pulumi.get(self, "termination_time") @termination_time.setter def termination_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "termination_time", value) @pulumi.input_type class SdsConfigArgs: def __init__(__self__, *, grpc_service_config: Optional[pulumi.Input['GrpcServiceConfigArgs']] = None): """ [Deprecated] The configuration to access the SDS server. The configuration to access the SDS server. :param pulumi.Input['GrpcServiceConfigArgs'] grpc_service_config: The configuration to access the SDS server over GRPC. """ if grpc_service_config is not None: pulumi.set(__self__, "grpc_service_config", grpc_service_config) @property @pulumi.getter(name="grpcServiceConfig") def grpc_service_config(self) -> Optional[pulumi.Input['GrpcServiceConfigArgs']]: """ The configuration to access the SDS server over GRPC. """ return pulumi.get(self, "grpc_service_config") @grpc_service_config.setter def grpc_service_config(self, value: Optional[pulumi.Input['GrpcServiceConfigArgs']]): pulumi.set(self, "grpc_service_config", value) @pulumi.input_type class SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigArgs: def __init__(__self__, *, enable: Optional[pulumi.Input[bool]] = None, rule_visibility: Optional[pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigRuleVisibility']] = None): """ Configuration options for L7 DDoS detection. :param pulumi.Input[bool] enable: If set to true, enables CAAP for L7 DDoS detection. :param pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigRuleVisibility'] rule_visibility: Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. """ if enable is not None: pulumi.set(__self__, "enable", enable) if rule_visibility is not None: pulumi.set(__self__, "rule_visibility", rule_visibility) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input[bool]]: """ If set to true, enables CAAP for L7 DDoS detection. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable", value) @property @pulumi.getter(name="ruleVisibility") def rule_visibility(self) -> Optional[pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigRuleVisibility']]: """ Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. """ return pulumi.get(self, "rule_visibility") @rule_visibility.setter def rule_visibility(self, value: Optional[pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigRuleVisibility']]): pulumi.set(self, "rule_visibility", value) @pulumi.input_type class SecurityPolicyAdaptiveProtectionConfigArgs: def __init__(__self__, *, layer7_ddos_defense_config: Optional[pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigArgs']] = None): """ Configuration options for Cloud Armor Adaptive Protection (CAAP). :param pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigArgs'] layer7_ddos_defense_config: If set to true, enables Cloud Armor Machine Learning. """ if layer7_ddos_defense_config is not None: pulumi.set(__self__, "layer7_ddos_defense_config", layer7_ddos_defense_config) @property @pulumi.getter(name="layer7DdosDefenseConfig") def layer7_ddos_defense_config(self) -> Optional[pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigArgs']]: """ If set to true, enables Cloud Armor Machine Learning. """ return pulumi.get(self, "layer7_ddos_defense_config") @layer7_ddos_defense_config.setter def layer7_ddos_defense_config(self, value: Optional[pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigArgs']]): pulumi.set(self, "layer7_ddos_defense_config", value) @pulumi.input_type class SecurityPolicyAdvancedOptionsConfigArgs: def __init__(__self__, *, json_parsing: Optional[pulumi.Input['SecurityPolicyAdvancedOptionsConfigJsonParsing']] = None, log_level: Optional[pulumi.Input['SecurityPolicyAdvancedOptionsConfigLogLevel']] = None): if json_parsing is not None: pulumi.set(__self__, "json_parsing", json_parsing) if log_level is not None: pulumi.set(__self__, "log_level", log_level) @property @pulumi.getter(name="jsonParsing") def json_parsing(self) -> Optional[pulumi.Input['SecurityPolicyAdvancedOptionsConfigJsonParsing']]: return pulumi.get(self, "json_parsing") @json_parsing.setter def json_parsing(self, value: Optional[pulumi.Input['SecurityPolicyAdvancedOptionsConfigJsonParsing']]): pulumi.set(self, "json_parsing", value) @property @pulumi.getter(name="logLevel") def log_level(self) -> Optional[pulumi.Input['SecurityPolicyAdvancedOptionsConfigLogLevel']]: return pulumi.get(self, "log_level") @log_level.setter def log_level(self, value: Optional[pulumi.Input['SecurityPolicyAdvancedOptionsConfigLogLevel']]): pulumi.set(self, "log_level", value) @pulumi.input_type class SecurityPolicyAssociationArgs: def __init__(__self__, *, attachment_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] attachment_id: The resource that the security policy is attached to. :param pulumi.Input[str] name: The name for an association. """ if attachment_id is not None: pulumi.set(__self__, "attachment_id", attachment_id) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="attachmentId") def attachment_id(self) -> Optional[pulumi.Input[str]]: """ The resource that the security policy is attached to. """ return pulumi.get(self, "attachment_id") @attachment_id.setter def attachment_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "attachment_id", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name for an association. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class SecurityPolicyCloudArmorConfigArgs: def __init__(__self__, *, enable_ml: Optional[pulumi.Input[bool]] = None): """ Configuration options for Cloud Armor. :param pulumi.Input[bool] enable_ml: If set to true, enables Cloud Armor Machine Learning. """ if enable_ml is not None: pulumi.set(__self__, "enable_ml", enable_ml) @property @pulumi.getter(name="enableMl") def enable_ml(self) -> Optional[pulumi.Input[bool]]: """ If set to true, enables Cloud Armor Machine Learning. """ return pulumi.get(self, "enable_ml") @enable_ml.setter def enable_ml(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_ml", value) @pulumi.input_type class SecurityPolicyDdosProtectionConfigArgs: def __init__(__self__, *, ddos_protection: Optional[pulumi.Input['SecurityPolicyDdosProtectionConfigDdosProtection']] = None): if ddos_protection is not None: pulumi.set(__self__, "ddos_protection", ddos_protection) @property @pulumi.getter(name="ddosProtection") def ddos_protection(self) -> Optional[pulumi.Input['SecurityPolicyDdosProtectionConfigDdosProtection']]: return pulumi.get(self, "ddos_protection") @ddos_protection.setter def ddos_protection(self, value: Optional[pulumi.Input['SecurityPolicyDdosProtectionConfigDdosProtection']]): pulumi.set(self, "ddos_protection", value) @pulumi.input_type class SecurityPolicyRecaptchaOptionsConfigArgs: def __init__(__self__, *, redirect_site_key: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] redirect_site_key: An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. """ if redirect_site_key is not None: pulumi.set(__self__, "redirect_site_key", redirect_site_key) @property @pulumi.getter(name="redirectSiteKey") def redirect_site_key(self) -> Optional[pulumi.Input[str]]: """ An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. """ return pulumi.get(self, "redirect_site_key") @redirect_site_key.setter def redirect_site_key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "redirect_site_key", value) @pulumi.input_type class SecurityPolicyRuleHttpHeaderActionHttpHeaderOptionArgs: def __init__(__self__, *, header_name: Optional[pulumi.Input[str]] = None, header_value: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] header_name: The name of the header to set. :param pulumi.Input[str] header_value: The value to set the named header to. """ if header_name is not None: pulumi.set(__self__, "header_name", header_name) if header_value is not None: pulumi.set(__self__, "header_value", header_value) @property @pulumi.getter(name="headerName") def header_name(self) -> Optional[pulumi.Input[str]]: """ The name of the header to set. """ return pulumi.get(self, "header_name") @header_name.setter def header_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_name", value) @property @pulumi.getter(name="headerValue") def header_value(self) -> Optional[pulumi.Input[str]]: """ The value to set the named header to. """ return pulumi.get(self, "header_value") @header_value.setter def header_value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_value", value) @pulumi.input_type class SecurityPolicyRuleHttpHeaderActionArgs: def __init__(__self__, *, request_headers_to_adds: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleHttpHeaderActionHttpHeaderOptionArgs']]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleHttpHeaderActionHttpHeaderOptionArgs']]] request_headers_to_adds: The list of request headers to add or overwrite if they're already present. """ if request_headers_to_adds is not None: pulumi.set(__self__, "request_headers_to_adds", request_headers_to_adds) @property @pulumi.getter(name="requestHeadersToAdds") def request_headers_to_adds(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleHttpHeaderActionHttpHeaderOptionArgs']]]]: """ The list of request headers to add or overwrite if they're already present. """ return pulumi.get(self, "request_headers_to_adds") @request_headers_to_adds.setter def request_headers_to_adds(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleHttpHeaderActionHttpHeaderOptionArgs']]]]): pulumi.set(self, "request_headers_to_adds", value) @pulumi.input_type class SecurityPolicyRuleMatcherConfigDestinationPortArgs: def __init__(__self__, *, ip_protocol: Optional[pulumi.Input[str]] = None, ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. """ if ip_protocol is not None: pulumi.set(__self__, "ip_protocol", ip_protocol) if ports is not None: pulumi.set(__self__, "ports", ports) @property @pulumi.getter(name="ipProtocol") def ip_protocol(self) -> Optional[pulumi.Input[str]]: """ The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. """ return pulumi.get(self, "ip_protocol") @ip_protocol.setter def ip_protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_protocol", value) @property @pulumi.getter def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "ports") @ports.setter def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ports", value) @pulumi.input_type class SecurityPolicyRuleMatcherConfigLayer4ConfigArgs: def __init__(__self__, *, ip_protocol: Optional[pulumi.Input[str]] = None, ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. """ if ip_protocol is not None: pulumi.set(__self__, "ip_protocol", ip_protocol) if ports is not None: pulumi.set(__self__, "ports", ports) @property @pulumi.getter(name="ipProtocol") def ip_protocol(self) -> Optional[pulumi.Input[str]]: """ The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. """ return pulumi.get(self, "ip_protocol") @ip_protocol.setter def ip_protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_protocol", value) @property @pulumi.getter def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "ports") @ports.setter def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ports", value) @pulumi.input_type class SecurityPolicyRuleMatcherConfigArgs: def __init__(__self__, *, dest_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, dest_ports: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigDestinationPortArgs']]]] = None, layer4_configs: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigLayer4ConfigArgs']]]] = None, src_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_ip_ranges: CIDR IP address range. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigDestinationPortArgs']]] dest_ports: Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigLayer4ConfigArgs']]] layer4_configs: Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_ip_ranges: CIDR IP address range. Maximum number of src_ip_ranges allowed is 10. """ if dest_ip_ranges is not None: pulumi.set(__self__, "dest_ip_ranges", dest_ip_ranges) if dest_ports is not None: pulumi.set(__self__, "dest_ports", dest_ports) if layer4_configs is not None: pulumi.set(__self__, "layer4_configs", layer4_configs) if src_ip_ranges is not None: pulumi.set(__self__, "src_ip_ranges", src_ip_ranges) @property @pulumi.getter(name="destIpRanges") def dest_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ CIDR IP address range. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "dest_ip_ranges") @dest_ip_ranges.setter def dest_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dest_ip_ranges", value) @property @pulumi.getter(name="destPorts") def dest_ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigDestinationPortArgs']]]]: """ Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "dest_ports") @dest_ports.setter def dest_ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigDestinationPortArgs']]]]): pulumi.set(self, "dest_ports", value) @property @pulumi.getter(name="layer4Configs") def layer4_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigLayer4ConfigArgs']]]]: """ Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "layer4_configs") @layer4_configs.setter def layer4_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigLayer4ConfigArgs']]]]): pulumi.set(self, "layer4_configs", value) @property @pulumi.getter(name="srcIpRanges") def src_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ CIDR IP address range. Maximum number of src_ip_ranges allowed is 10. """ return pulumi.get(self, "src_ip_ranges") @src_ip_ranges.setter def src_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "src_ip_ranges", value) @pulumi.input_type class SecurityPolicyRuleMatcherArgs: def __init__(__self__, *, config: Optional[pulumi.Input['SecurityPolicyRuleMatcherConfigArgs']] = None, expr: Optional[pulumi.Input['ExprArgs']] = None, versioned_expr: Optional[pulumi.Input['SecurityPolicyRuleMatcherVersionedExpr']] = None): """ Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified. :param pulumi.Input['SecurityPolicyRuleMatcherConfigArgs'] config: The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified. :param pulumi.Input['ExprArgs'] expr: User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. :param pulumi.Input['SecurityPolicyRuleMatcherVersionedExpr'] versioned_expr: Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config. """ if config is not None: pulumi.set(__self__, "config", config) if expr is not None: pulumi.set(__self__, "expr", expr) if versioned_expr is not None: pulumi.set(__self__, "versioned_expr", versioned_expr) @property @pulumi.getter def config(self) -> Optional[pulumi.Input['SecurityPolicyRuleMatcherConfigArgs']]: """ The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified. """ return pulumi.get(self, "config") @config.setter def config(self, value: Optional[pulumi.Input['SecurityPolicyRuleMatcherConfigArgs']]): pulumi.set(self, "config", value) @property @pulumi.getter def expr(self) -> Optional[pulumi.Input['ExprArgs']]: """ User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. """ return pulumi.get(self, "expr") @expr.setter def expr(self, value: Optional[pulumi.Input['ExprArgs']]): pulumi.set(self, "expr", value) @property @pulumi.getter(name="versionedExpr") def versioned_expr(self) -> Optional[pulumi.Input['SecurityPolicyRuleMatcherVersionedExpr']]: """ Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config. """ return pulumi.get(self, "versioned_expr") @versioned_expr.setter def versioned_expr(self, value: Optional[pulumi.Input['SecurityPolicyRuleMatcherVersionedExpr']]): pulumi.set(self, "versioned_expr", value) @pulumi.input_type class SecurityPolicyRuleRateLimitOptionsThresholdArgs: def __init__(__self__, *, count: Optional[pulumi.Input[int]] = None, interval_sec: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] count: Number of HTTP(S) requests for calculating the threshold. :param pulumi.Input[int] interval_sec: Interval over which the threshold is computed. """ if count is not None: pulumi.set(__self__, "count", count) if interval_sec is not None: pulumi.set(__self__, "interval_sec", interval_sec) @property @pulumi.getter def count(self) -> Optional[pulumi.Input[int]]: """ Number of HTTP(S) requests for calculating the threshold. """ return pulumi.get(self, "count") @count.setter def count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "count", value) @property @pulumi.getter(name="intervalSec") def interval_sec(self) -> Optional[pulumi.Input[int]]: """ Interval over which the threshold is computed. """ return pulumi.get(self, "interval_sec") @interval_sec.setter def interval_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "interval_sec", value) @pulumi.input_type class SecurityPolicyRuleRateLimitOptionsArgs: def __init__(__self__, *, ban_duration_sec: Optional[pulumi.Input[int]] = None, ban_threshold: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs']] = None, conform_action: Optional[pulumi.Input[str]] = None, enforce_on_key: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsEnforceOnKey']] = None, enforce_on_key_name: Optional[pulumi.Input[str]] = None, exceed_action: Optional[pulumi.Input[str]] = None, exceed_redirect_options: Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs']] = None, rate_limit_threshold: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs']] = None): """ :param pulumi.Input[int] ban_duration_sec: Can only be specified if the action for the rule is "rate_based_ban". If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. :param pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs'] ban_threshold: Can only be specified if the action for the rule is "rate_based_ban". If specified, the key will be banned for the configured 'ban_duration_sec' when the number of requests that exceed the 'rate_limit_threshold' also exceed this 'ban_threshold'. :param pulumi.Input[str] conform_action: Action to take for requests that are under the configured rate limit threshold. Valid option is "allow" only. :param pulumi.Input['SecurityPolicyRuleRateLimitOptionsEnforceOnKey'] enforce_on_key: Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if this field 'enforce_on_key' is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under "enforce_on_key_name". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key type defaults to ALL. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforce_on_key_name". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. :param pulumi.Input[str] enforce_on_key_name: Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. :param pulumi.Input[str] exceed_action: Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are "deny()" where valid values for status are 403, 404, 429, and 502, and "redirect" where the redirect parameters come from exceed_redirect_options below. :param pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs'] exceed_redirect_options: Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. :param pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs'] rate_limit_threshold: Threshold at which to begin ratelimiting. """ if ban_duration_sec is not None: pulumi.set(__self__, "ban_duration_sec", ban_duration_sec) if ban_threshold is not None: pulumi.set(__self__, "ban_threshold", ban_threshold) if conform_action is not None: pulumi.set(__self__, "conform_action", conform_action) if enforce_on_key is not None: pulumi.set(__self__, "enforce_on_key", enforce_on_key) if enforce_on_key_name is not None: pulumi.set(__self__, "enforce_on_key_name", enforce_on_key_name) if exceed_action is not None: pulumi.set(__self__, "exceed_action", exceed_action) if exceed_redirect_options is not None: pulumi.set(__self__, "exceed_redirect_options", exceed_redirect_options) if rate_limit_threshold is not None: pulumi.set(__self__, "rate_limit_threshold", rate_limit_threshold) @property @pulumi.getter(name="banDurationSec") def ban_duration_sec(self) -> Optional[pulumi.Input[int]]: """ Can only be specified if the action for the rule is "rate_based_ban". If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. """ return pulumi.get(self, "ban_duration_sec") @ban_duration_sec.setter def ban_duration_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "ban_duration_sec", value) @property @pulumi.getter(name="banThreshold") def ban_threshold(self) -> Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs']]: """ Can only be specified if the action for the rule is "rate_based_ban". If specified, the key will be banned for the configured 'ban_duration_sec' when the number of requests that exceed the 'rate_limit_threshold' also exceed this 'ban_threshold'. """ return pulumi.get(self, "ban_threshold") @ban_threshold.setter def ban_threshold(self, value: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs']]): pulumi.set(self, "ban_threshold", value) @property @pulumi.getter(name="conformAction") def conform_action(self) -> Optional[pulumi.Input[str]]: """ Action to take for requests that are under the configured rate limit threshold. Valid option is "allow" only. """ return pulumi.get(self, "conform_action") @conform_action.setter def conform_action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "conform_action", value) @property @pulumi.getter(name="enforceOnKey") def enforce_on_key(self) -> Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsEnforceOnKey']]: """ Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if this field 'enforce_on_key' is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under "enforce_on_key_name". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key type defaults to ALL. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforce_on_key_name". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. """ return pulumi.get(self, "enforce_on_key") @enforce_on_key.setter def enforce_on_key(self, value: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsEnforceOnKey']]): pulumi.set(self, "enforce_on_key", value) @property @pulumi.getter(name="enforceOnKeyName") def enforce_on_key_name(self) -> Optional[pulumi.Input[str]]: """ Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. """ return pulumi.get(self, "enforce_on_key_name") @enforce_on_key_name.setter def enforce_on_key_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "enforce_on_key_name", value) @property @pulumi.getter(name="exceedAction") def exceed_action(self) -> Optional[pulumi.Input[str]]: """ Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are "deny()" where valid values for status are 403, 404, 429, and 502, and "redirect" where the redirect parameters come from exceed_redirect_options below. """ return pulumi.get(self, "exceed_action") @exceed_action.setter def exceed_action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "exceed_action", value) @property @pulumi.getter(name="exceedRedirectOptions") def exceed_redirect_options(self) -> Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs']]: """ Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. """ return pulumi.get(self, "exceed_redirect_options") @exceed_redirect_options.setter def exceed_redirect_options(self, value: Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs']]): pulumi.set(self, "exceed_redirect_options", value) @property @pulumi.getter(name="rateLimitThreshold") def rate_limit_threshold(self) -> Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs']]: """ Threshold at which to begin ratelimiting. """ return pulumi.get(self, "rate_limit_threshold") @rate_limit_threshold.setter def rate_limit_threshold(self, value: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs']]): pulumi.set(self, "rate_limit_threshold", value) @pulumi.input_type class SecurityPolicyRuleRedirectOptionsArgs: def __init__(__self__, *, target: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsType']] = None): """ :param pulumi.Input[str] target: Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. :param pulumi.Input['SecurityPolicyRuleRedirectOptionsType'] type: Type of the redirect action. """ if target is not None: pulumi.set(__self__, "target", target) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter def target(self) -> Optional[pulumi.Input[str]]: """ Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. """ return pulumi.get(self, "target") @target.setter def target(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "target", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsType']]: """ Type of the redirect action. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsType']]): pulumi.set(self, "type", value) @pulumi.input_type class SecurityPolicyRuleArgs: def __init__(__self__, *, action: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, direction: Optional[pulumi.Input['SecurityPolicyRuleDirection']] = None, enable_logging: Optional[pulumi.Input[bool]] = None, header_action: Optional[pulumi.Input['SecurityPolicyRuleHttpHeaderActionArgs']] = None, match: Optional[pulumi.Input['SecurityPolicyRuleMatcherArgs']] = None, preview: Optional[pulumi.Input[bool]] = None, priority: Optional[pulumi.Input[int]] = None, rate_limit_options: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsArgs']] = None, redirect_options: Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs']] = None, redirect_target: Optional[pulumi.Input[str]] = None, rule_number: Optional[pulumi.Input[str]] = None, target_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, target_service_accounts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny). :param pulumi.Input[str] action: The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(): deny access to target, returns the HTTP response code specified (valid values are 403, 404, and 502). - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input['SecurityPolicyRuleDirection'] direction: The direction in which this rule applies. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[bool] enable_logging: Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. This field may only be specified when the versioned_expr is set to FIREWALL. :param pulumi.Input['SecurityPolicyRuleHttpHeaderActionArgs'] header_action: Optional, additional actions that are performed on headers. :param pulumi.Input['SecurityPolicyRuleMatcherArgs'] match: A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. :param pulumi.Input[bool] preview: If set to true, the specified action is not enforced. :param pulumi.Input[int] priority: An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. :param pulumi.Input['SecurityPolicyRuleRateLimitOptionsArgs'] rate_limit_options: Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions. :param pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs'] redirect_options: Parameters defining the redirect action. Cannot be specified for any other actions. :param pulumi.Input[str] redirect_target: This must be specified for redirect actions. Cannot be specified for any other actions. :param pulumi.Input[str] rule_number: Identifier for the rule. This is only unique within the given security policy. This can only be set during rule creation, if rule number is not specified it will be generated by the server. :param pulumi.Input[Sequence[pulumi.Input[str]]] target_resources: A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[Sequence[pulumi.Input[str]]] target_service_accounts: A list of service accounts indicating the sets of instances that are applied with this rule. """ if action is not None: pulumi.set(__self__, "action", action) if description is not None: pulumi.set(__self__, "description", description) if direction is not None: pulumi.set(__self__, "direction", direction) if enable_logging is not None: pulumi.set(__self__, "enable_logging", enable_logging) if header_action is not None: pulumi.set(__self__, "header_action", header_action) if match is not None: pulumi.set(__self__, "match", match) if preview is not None: pulumi.set(__self__, "preview", preview) if priority is not None: pulumi.set(__self__, "priority", priority) if rate_limit_options is not None: pulumi.set(__self__, "rate_limit_options", rate_limit_options) if redirect_options is not None: pulumi.set(__self__, "redirect_options", redirect_options) if redirect_target is not None: pulumi.set(__self__, "redirect_target", redirect_target) if rule_number is not None: pulumi.set(__self__, "rule_number", rule_number) if target_resources is not None: pulumi.set(__self__, "target_resources", target_resources) if target_service_accounts is not None: pulumi.set(__self__, "target_service_accounts", target_service_accounts) @property @pulumi.getter def action(self) -> Optional[pulumi.Input[str]]: """ The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(): deny access to target, returns the HTTP response code specified (valid values are 403, 404, and 502). - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. """ return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "action", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def direction(self) -> Optional[pulumi.Input['SecurityPolicyRuleDirection']]: """ The direction in which this rule applies. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "direction") @direction.setter def direction(self, value: Optional[pulumi.Input['SecurityPolicyRuleDirection']]): pulumi.set(self, "direction", value) @property @pulumi.getter(name="enableLogging") def enable_logging(self) -> Optional[pulumi.Input[bool]]: """ Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. This field may only be specified when the versioned_expr is set to FIREWALL. """ return pulumi.get(self, "enable_logging") @enable_logging.setter def enable_logging(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_logging", value) @property @pulumi.getter(name="headerAction") def header_action(self) -> Optional[pulumi.Input['SecurityPolicyRuleHttpHeaderActionArgs']]: """ Optional, additional actions that are performed on headers. """ return pulumi.get(self, "header_action") @header_action.setter def header_action(self, value: Optional[pulumi.Input['SecurityPolicyRuleHttpHeaderActionArgs']]): pulumi.set(self, "header_action", value) @property @pulumi.getter def match(self) -> Optional[pulumi.Input['SecurityPolicyRuleMatcherArgs']]: """ A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. """ return pulumi.get(self, "match") @match.setter def match(self, value: Optional[pulumi.Input['SecurityPolicyRuleMatcherArgs']]): pulumi.set(self, "match", value) @property @pulumi.getter def preview(self) -> Optional[pulumi.Input[bool]]: """ If set to true, the specified action is not enforced. """ return pulumi.get(self, "preview") @preview.setter def preview(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "preview", value) @property @pulumi.getter def priority(self) -> Optional[pulumi.Input[int]]: """ An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. """ return pulumi.get(self, "priority") @priority.setter def priority(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "priority", value) @property @pulumi.getter(name="rateLimitOptions") def rate_limit_options(self) -> Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsArgs']]: """ Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions. """ return pulumi.get(self, "rate_limit_options") @rate_limit_options.setter def rate_limit_options(self, value: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsArgs']]): pulumi.set(self, "rate_limit_options", value) @property @pulumi.getter(name="redirectOptions") def redirect_options(self) -> Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs']]: """ Parameters defining the redirect action. Cannot be specified for any other actions. """ return pulumi.get(self, "redirect_options") @redirect_options.setter def redirect_options(self, value: Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs']]): pulumi.set(self, "redirect_options", value) @property @pulumi.getter(name="redirectTarget") def redirect_target(self) -> Optional[pulumi.Input[str]]: """ This must be specified for redirect actions. Cannot be specified for any other actions. """ return pulumi.get(self, "redirect_target") @redirect_target.setter def redirect_target(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "redirect_target", value) @property @pulumi.getter(name="ruleNumber") def rule_number(self) -> Optional[pulumi.Input[str]]: """ Identifier for the rule. This is only unique within the given security policy. This can only be set during rule creation, if rule number is not specified it will be generated by the server. """ return pulumi.get(self, "rule_number") @rule_number.setter def rule_number(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "rule_number", value) @property @pulumi.getter(name="targetResources") def target_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "target_resources") @target_resources.setter def target_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "target_resources", value) @property @pulumi.getter(name="targetServiceAccounts") def target_service_accounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of service accounts indicating the sets of instances that are applied with this rule. """ return pulumi.get(self, "target_service_accounts") @target_service_accounts.setter def target_service_accounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "target_service_accounts", value) @pulumi.input_type class SecuritySettingsArgs: def __init__(__self__, *, client_tls_policy: Optional[pulumi.Input[str]] = None, subject_alt_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ The authentication and authorization settings for a BackendService. :param pulumi.Input[str] client_tls_policy: Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact. :param pulumi.Input[Sequence[pulumi.Input[str]]] subject_alt_names: Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact. """ if client_tls_policy is not None: pulumi.set(__self__, "client_tls_policy", client_tls_policy) if subject_alt_names is not None: pulumi.set(__self__, "subject_alt_names", subject_alt_names) @property @pulumi.getter(name="clientTlsPolicy") def client_tls_policy(self) -> Optional[pulumi.Input[str]]: """ Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact. """ return pulumi.get(self, "client_tls_policy") @client_tls_policy.setter def client_tls_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "client_tls_policy", value) @property @pulumi.getter(name="subjectAltNames") def subject_alt_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact. """ return pulumi.get(self, "subject_alt_names") @subject_alt_names.setter def subject_alt_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "subject_alt_names", value) @pulumi.input_type class ServerBindingArgs: def __init__(__self__, *, type: Optional[pulumi.Input['ServerBindingType']] = None): if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['ServerBindingType']]: return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['ServerBindingType']]): pulumi.set(self, "type", value) @pulumi.input_type class ServerTlsSettingsArgs: def __init__(__self__, *, proxy_tls_context: Optional[pulumi.Input['TlsContextArgs']] = None, subject_alt_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, tls_mode: Optional[pulumi.Input['ServerTlsSettingsTlsMode']] = None): """ The TLS settings for the server. :param pulumi.Input['TlsContextArgs'] proxy_tls_context: Configures the mechanism to obtain security certificates and identity information. :param pulumi.Input[Sequence[pulumi.Input[str]]] subject_alt_names: A list of alternate names to verify the subject identity in the certificate presented by the client. :param pulumi.Input['ServerTlsSettingsTlsMode'] tls_mode: Indicates whether connections should be secured using TLS. The value of this field determines how TLS is enforced. This field can be set to one of the following: - SIMPLE Secure connections with standard TLS semantics. - MUTUAL Secure connections to the backends using mutual TLS by presenting client certificates for authentication. """ if proxy_tls_context is not None: pulumi.set(__self__, "proxy_tls_context", proxy_tls_context) if subject_alt_names is not None: pulumi.set(__self__, "subject_alt_names", subject_alt_names) if tls_mode is not None: pulumi.set(__self__, "tls_mode", tls_mode) @property @pulumi.getter(name="proxyTlsContext") def proxy_tls_context(self) -> Optional[pulumi.Input['TlsContextArgs']]: """ Configures the mechanism to obtain security certificates and identity information. """ return pulumi.get(self, "proxy_tls_context") @proxy_tls_context.setter def proxy_tls_context(self, value: Optional[pulumi.Input['TlsContextArgs']]): pulumi.set(self, "proxy_tls_context", value) @property @pulumi.getter(name="subjectAltNames") def subject_alt_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of alternate names to verify the subject identity in the certificate presented by the client. """ return pulumi.get(self, "subject_alt_names") @subject_alt_names.setter def subject_alt_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "subject_alt_names", value) @property @pulumi.getter(name="tlsMode") def tls_mode(self) -> Optional[pulumi.Input['ServerTlsSettingsTlsMode']]: """ Indicates whether connections should be secured using TLS. The value of this field determines how TLS is enforced. This field can be set to one of the following: - SIMPLE Secure connections with standard TLS semantics. - MUTUAL Secure connections to the backends using mutual TLS by presenting client certificates for authentication. """ return pulumi.get(self, "tls_mode") @tls_mode.setter def tls_mode(self, value: Optional[pulumi.Input['ServerTlsSettingsTlsMode']]): pulumi.set(self, "tls_mode", value) @pulumi.input_type class ServiceAccountArgs: def __init__(__self__, *, email: Optional[pulumi.Input[str]] = None, scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ A service account. :param pulumi.Input[str] email: Email address of the service account. :param pulumi.Input[Sequence[pulumi.Input[str]]] scopes: The list of scopes to be made available for this service account. """ if email is not None: pulumi.set(__self__, "email", email) if scopes is not None: pulumi.set(__self__, "scopes", scopes) @property @pulumi.getter def email(self) -> Optional[pulumi.Input[str]]: """ Email address of the service account. """ return pulumi.get(self, "email") @email.setter def email(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "email", value) @property @pulumi.getter def scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The list of scopes to be made available for this service account. """ return pulumi.get(self, "scopes") @scopes.setter def scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "scopes", value) @pulumi.input_type class ServiceAttachmentConsumerProjectLimitArgs: def __init__(__self__, *, connection_limit: Optional[pulumi.Input[int]] = None, project_id_or_num: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[int] connection_limit: The value of the limit to set. :param pulumi.Input[str] project_id_or_num: The project id or number for the project to set the limit for. """ if connection_limit is not None: pulumi.set(__self__, "connection_limit", connection_limit) if project_id_or_num is not None: pulumi.set(__self__, "project_id_or_num", project_id_or_num) @property @pulumi.getter(name="connectionLimit") def connection_limit(self) -> Optional[pulumi.Input[int]]: """ The value of the limit to set. """ return pulumi.get(self, "connection_limit") @connection_limit.setter def connection_limit(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "connection_limit", value) @property @pulumi.getter(name="projectIdOrNum") def project_id_or_num(self) -> Optional[pulumi.Input[str]]: """ The project id or number for the project to set the limit for. """ return pulumi.get(self, "project_id_or_num") @project_id_or_num.setter def project_id_or_num(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project_id_or_num", value) @pulumi.input_type class ShareSettingsArgs: def __init__(__self__, *, folder_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, project_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, projects: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, share_type: Optional[pulumi.Input['ShareSettingsShareType']] = None): """ The share setting for reservations and sole tenancy node groups. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] folder_map: A map of folder id and folder config to specify consumer projects for this shared-reservation. This is only valid when share_type's value is DIRECT_PROJECTS_UNDER_SPECIFIC_FOLDERS. Folder id should be a string of number, and without "folders/" prefix. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] project_map: A map of project id and project config. This is only valid when share_type's value is SPECIFIC_PROJECTS. :param pulumi.Input[Sequence[pulumi.Input[str]]] projects: A List of Project names to specify consumer projects for this shared-reservation. This is only valid when share_type's value is SPECIFIC_PROJECTS. :param pulumi.Input['ShareSettingsShareType'] share_type: Type of sharing for this shared-reservation """ if folder_map is not None: pulumi.set(__self__, "folder_map", folder_map) if project_map is not None: pulumi.set(__self__, "project_map", project_map) if projects is not None: pulumi.set(__self__, "projects", projects) if share_type is not None: pulumi.set(__self__, "share_type", share_type) @property @pulumi.getter(name="folderMap") def folder_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of folder id and folder config to specify consumer projects for this shared-reservation. This is only valid when share_type's value is DIRECT_PROJECTS_UNDER_SPECIFIC_FOLDERS. Folder id should be a string of number, and without "folders/" prefix. """ return pulumi.get(self, "folder_map") @folder_map.setter def folder_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "folder_map", value) @property @pulumi.getter(name="projectMap") def project_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of project id and project config. This is only valid when share_type's value is SPECIFIC_PROJECTS. """ return pulumi.get(self, "project_map") @project_map.setter def project_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "project_map", value) @property @pulumi.getter def projects(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A List of Project names to specify consumer projects for this shared-reservation. This is only valid when share_type's value is SPECIFIC_PROJECTS. """ return pulumi.get(self, "projects") @projects.setter def projects(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "projects", value) @property @pulumi.getter(name="shareType") def share_type(self) -> Optional[pulumi.Input['ShareSettingsShareType']]: """ Type of sharing for this shared-reservation """ return pulumi.get(self, "share_type") @share_type.setter def share_type(self, value: Optional[pulumi.Input['ShareSettingsShareType']]): pulumi.set(self, "share_type", value) @pulumi.input_type class ShieldedInstanceConfigArgs: def __init__(__self__, *, enable_integrity_monitoring: Optional[pulumi.Input[bool]] = None, enable_secure_boot: Optional[pulumi.Input[bool]] = None, enable_vtpm: Optional[pulumi.Input[bool]] = None): """ A set of Shielded Instance options. :param pulumi.Input[bool] enable_integrity_monitoring: Defines whether the instance has integrity monitoring enabled. Enabled by default. :param pulumi.Input[bool] enable_secure_boot: Defines whether the instance has Secure Boot enabled. Disabled by default. :param pulumi.Input[bool] enable_vtpm: Defines whether the instance has the vTPM enabled. Enabled by default. """ if enable_integrity_monitoring is not None: pulumi.set(__self__, "enable_integrity_monitoring", enable_integrity_monitoring) if enable_secure_boot is not None: pulumi.set(__self__, "enable_secure_boot", enable_secure_boot) if enable_vtpm is not None: pulumi.set(__self__, "enable_vtpm", enable_vtpm) @property @pulumi.getter(name="enableIntegrityMonitoring") def enable_integrity_monitoring(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has integrity monitoring enabled. Enabled by default. """ return pulumi.get(self, "enable_integrity_monitoring") @enable_integrity_monitoring.setter def enable_integrity_monitoring(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_integrity_monitoring", value) @property @pulumi.getter(name="enableSecureBoot") def enable_secure_boot(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has Secure Boot enabled. Disabled by default. """ return pulumi.get(self, "enable_secure_boot") @enable_secure_boot.setter def enable_secure_boot(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_secure_boot", value) @property @pulumi.getter(name="enableVtpm") def enable_vtpm(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has the vTPM enabled. Enabled by default. """ return pulumi.get(self, "enable_vtpm") @enable_vtpm.setter def enable_vtpm(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_vtpm", value) @pulumi.input_type class ShieldedInstanceIntegrityPolicyArgs: def __init__(__self__, *, update_auto_learn_policy: Optional[pulumi.Input[bool]] = None): """ The policy describes the baseline against which Instance boot integrity is measured. :param pulumi.Input[bool] update_auto_learn_policy: Updates the integrity policy baseline using the measurements from the VM instance's most recent boot. """ if update_auto_learn_policy is not None: pulumi.set(__self__, "update_auto_learn_policy", update_auto_learn_policy) @property @pulumi.getter(name="updateAutoLearnPolicy") def update_auto_learn_policy(self) -> Optional[pulumi.Input[bool]]: """ Updates the integrity policy baseline using the measurements from the VM instance's most recent boot. """ return pulumi.get(self, "update_auto_learn_policy") @update_auto_learn_policy.setter def update_auto_learn_policy(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "update_auto_learn_policy", value) @pulumi.input_type class ShieldedVmConfigArgs: def __init__(__self__, *, enable_integrity_monitoring: Optional[pulumi.Input[bool]] = None, enable_secure_boot: Optional[pulumi.Input[bool]] = None, enable_vtpm: Optional[pulumi.Input[bool]] = None): """ A set of Shielded VM options. :param pulumi.Input[bool] enable_integrity_monitoring: Defines whether the instance has integrity monitoring enabled. :param pulumi.Input[bool] enable_secure_boot: Defines whether the instance has Secure Boot enabled. :param pulumi.Input[bool] enable_vtpm: Defines whether the instance has the vTPM enabled. """ if enable_integrity_monitoring is not None: pulumi.set(__self__, "enable_integrity_monitoring", enable_integrity_monitoring) if enable_secure_boot is not None: pulumi.set(__self__, "enable_secure_boot", enable_secure_boot) if enable_vtpm is not None: pulumi.set(__self__, "enable_vtpm", enable_vtpm) @property @pulumi.getter(name="enableIntegrityMonitoring") def enable_integrity_monitoring(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has integrity monitoring enabled. """ return pulumi.get(self, "enable_integrity_monitoring") @enable_integrity_monitoring.setter def enable_integrity_monitoring(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_integrity_monitoring", value) @property @pulumi.getter(name="enableSecureBoot") def enable_secure_boot(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has Secure Boot enabled. """ return pulumi.get(self, "enable_secure_boot") @enable_secure_boot.setter def enable_secure_boot(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_secure_boot", value) @property @pulumi.getter(name="enableVtpm") def enable_vtpm(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has the vTPM enabled. """ return pulumi.get(self, "enable_vtpm") @enable_vtpm.setter def enable_vtpm(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_vtpm", value) @pulumi.input_type class ShieldedVmIntegrityPolicyArgs: def __init__(__self__, *, update_auto_learn_policy: Optional[pulumi.Input[bool]] = None): """ The policy describes the baseline against which VM instance boot integrity is measured. :param pulumi.Input[bool] update_auto_learn_policy: Updates the integrity policy baseline using the measurements from the VM instance's most recent boot. """ if update_auto_learn_policy is not None: pulumi.set(__self__, "update_auto_learn_policy", update_auto_learn_policy) @property @pulumi.getter(name="updateAutoLearnPolicy") def update_auto_learn_policy(self) -> Optional[pulumi.Input[bool]]: """ Updates the integrity policy baseline using the measurements from the VM instance's most recent boot. """ return pulumi.get(self, "update_auto_learn_policy") @update_auto_learn_policy.setter def update_auto_learn_policy(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "update_auto_learn_policy", value) @pulumi.input_type class SourceDiskEncryptionKeyArgs: def __init__(__self__, *, disk_encryption_key: Optional[pulumi.Input['CustomerEncryptionKeyArgs']] = None, source_disk: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input['CustomerEncryptionKeyArgs'] disk_encryption_key: The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key. :param pulumi.Input[str] source_disk: URL of the disk attached to the source instance. This can be a full or valid partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk """ if disk_encryption_key is not None: pulumi.set(__self__, "disk_encryption_key", disk_encryption_key) if source_disk is not None: pulumi.set(__self__, "source_disk", source_disk) @property @pulumi.getter(name="diskEncryptionKey") def disk_encryption_key(self) -> Optional[pulumi.Input['CustomerEncryptionKeyArgs']]: """ The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key. """ return pulumi.get(self, "disk_encryption_key") @disk_encryption_key.setter def disk_encryption_key(self, value: Optional[pulumi.Input['CustomerEncryptionKeyArgs']]): pulumi.set(self, "disk_encryption_key", value) @property @pulumi.getter(name="sourceDisk") def source_disk(self) -> Optional[pulumi.Input[str]]: """ URL of the disk attached to the source instance. This can be a full or valid partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk """ return pulumi.get(self, "source_disk") @source_disk.setter def source_disk(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_disk", value) @pulumi.input_type class SourceInstanceParamsArgs: def __init__(__self__, *, disk_configs: Optional[pulumi.Input[Sequence[pulumi.Input['DiskInstantiationConfigArgs']]]] = None): """ A specification of the parameters to use when creating the instance template from a source instance. :param pulumi.Input[Sequence[pulumi.Input['DiskInstantiationConfigArgs']]] disk_configs: Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes. """ if disk_configs is not None: pulumi.set(__self__, "disk_configs", disk_configs) @property @pulumi.getter(name="diskConfigs") def disk_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DiskInstantiationConfigArgs']]]]: """ Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes. """ return pulumi.get(self, "disk_configs") @disk_configs.setter def disk_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DiskInstantiationConfigArgs']]]]): pulumi.set(self, "disk_configs", value) @pulumi.input_type class SslCertificateManagedSslCertificateArgs: def __init__(__self__, *, domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Configuration and status of a managed SSL certificate. :param pulumi.Input[Sequence[pulumi.Input[str]]] domains: The domains for which a managed SSL certificate will be generated. Each Google-managed SSL certificate supports up to the [maximum number of domains per Google-managed SSL certificate](/load-balancing/docs/quotas#ssl_certificates). """ if domains is not None: pulumi.set(__self__, "domains", domains) @property @pulumi.getter def domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The domains for which a managed SSL certificate will be generated. Each Google-managed SSL certificate supports up to the [maximum number of domains per Google-managed SSL certificate](/load-balancing/docs/quotas#ssl_certificates). """ return pulumi.get(self, "domains") @domains.setter def domains(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "domains", value) @pulumi.input_type class SslCertificateSelfManagedSslCertificateArgs: def __init__(__self__, *, certificate: Optional[pulumi.Input[str]] = None, private_key: Optional[pulumi.Input[str]] = None): """ Configuration and status of a self-managed SSL certificate. :param pulumi.Input[str] certificate: A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert. :param pulumi.Input[str] private_key: A write-only private key in PEM format. Only insert requests will include this field. """ if certificate is not None: pulumi.set(__self__, "certificate", certificate) if private_key is not None: pulumi.set(__self__, "private_key", private_key) @property @pulumi.getter def certificate(self) -> Optional[pulumi.Input[str]]: """ A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert. """ return pulumi.get(self, "certificate") @certificate.setter def certificate(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "certificate", value) @property @pulumi.getter(name="privateKey") def private_key(self) -> Optional[pulumi.Input[str]]: """ A write-only private key in PEM format. Only insert requests will include this field. """ return pulumi.get(self, "private_key") @private_key.setter def private_key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "private_key", value) @pulumi.input_type class StatefulPolicyPreservedStateArgs: def __init__(__self__, *, disks: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, external_ips: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, internal_ips: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ Configuration of preserved resources. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] disks: Disks created on the instances that will be preserved on instance delete, update, etc. This map is keyed with the device names of the disks. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] external_ips: External network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] internal_ips: Internal network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name. """ if disks is not None: pulumi.set(__self__, "disks", disks) if external_ips is not None: pulumi.set(__self__, "external_ips", external_ips) if internal_ips is not None: pulumi.set(__self__, "internal_ips", internal_ips) @property @pulumi.getter def disks(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Disks created on the instances that will be preserved on instance delete, update, etc. This map is keyed with the device names of the disks. """ return pulumi.get(self, "disks") @disks.setter def disks(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "disks", value) @property @pulumi.getter(name="externalIPs") def external_ips(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ External network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name. """ return pulumi.get(self, "external_ips") @external_ips.setter def external_ips(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "external_ips", value) @property @pulumi.getter(name="internalIPs") def internal_ips(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Internal network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name. """ return pulumi.get(self, "internal_ips") @internal_ips.setter def internal_ips(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "internal_ips", value) @pulumi.input_type class StatefulPolicyArgs: def __init__(__self__, *, preserved_state: Optional[pulumi.Input['StatefulPolicyPreservedStateArgs']] = None): if preserved_state is not None: pulumi.set(__self__, "preserved_state", preserved_state) @property @pulumi.getter(name="preservedState") def preserved_state(self) -> Optional[pulumi.Input['StatefulPolicyPreservedStateArgs']]: return pulumi.get(self, "preserved_state") @preserved_state.setter def preserved_state(self, value: Optional[pulumi.Input['StatefulPolicyPreservedStateArgs']]): pulumi.set(self, "preserved_state", value) @pulumi.input_type class SubnetworkLogConfigArgs: def __init__(__self__, *, aggregation_interval: Optional[pulumi.Input['SubnetworkLogConfigAggregationInterval']] = None, enable: Optional[pulumi.Input[bool]] = None, filter_expr: Optional[pulumi.Input[str]] = None, flow_sampling: Optional[pulumi.Input[float]] = None, metadata: Optional[pulumi.Input['SubnetworkLogConfigMetadata']] = None, metadata_fields: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ The available logging options for this subnetwork. :param pulumi.Input['SubnetworkLogConfigAggregationInterval'] aggregation_interval: Can only be specified if VPC flow logging for this subnetwork is enabled. Toggles the aggregation interval for collecting flow logs. Increasing the interval time will reduce the amount of generated flow logs for long lasting connections. Default is an interval of 5 seconds per connection. :param pulumi.Input[bool] enable: Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. :param pulumi.Input[str] filter_expr: Can only be specified if VPC flow logs for this subnetwork is enabled. Export filter used to define which VPC flow logs should be logged. :param pulumi.Input[float] flow_sampling: Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5 unless otherwise specified by the org policy, which means half of all collected logs are reported. :param pulumi.Input['SubnetworkLogConfigMetadata'] metadata: Can only be specified if VPC flow logs for this subnetwork is enabled. Configures whether all, none or a subset of metadata fields should be added to the reported VPC flow logs. Default is EXCLUDE_ALL_METADATA. :param pulumi.Input[Sequence[pulumi.Input[str]]] metadata_fields: Can only be specified if VPC flow logs for this subnetwork is enabled and "metadata" was set to CUSTOM_METADATA. """ if aggregation_interval is not None: pulumi.set(__self__, "aggregation_interval", aggregation_interval) if enable is not None: pulumi.set(__self__, "enable", enable) if filter_expr is not None: pulumi.set(__self__, "filter_expr", filter_expr) if flow_sampling is not None: pulumi.set(__self__, "flow_sampling", flow_sampling) if metadata is not None: pulumi.set(__self__, "metadata", metadata) if metadata_fields is not None: pulumi.set(__self__, "metadata_fields", metadata_fields) @property @pulumi.getter(name="aggregationInterval") def aggregation_interval(self) -> Optional[pulumi.Input['SubnetworkLogConfigAggregationInterval']]: """ Can only be specified if VPC flow logging for this subnetwork is enabled. Toggles the aggregation interval for collecting flow logs. Increasing the interval time will reduce the amount of generated flow logs for long lasting connections. Default is an interval of 5 seconds per connection. """ return pulumi.get(self, "aggregation_interval") @aggregation_interval.setter def aggregation_interval(self, value: Optional[pulumi.Input['SubnetworkLogConfigAggregationInterval']]): pulumi.set(self, "aggregation_interval", value) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input[bool]]: """ Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable", value) @property @pulumi.getter(name="filterExpr") def filter_expr(self) -> Optional[pulumi.Input[str]]: """ Can only be specified if VPC flow logs for this subnetwork is enabled. Export filter used to define which VPC flow logs should be logged. """ return pulumi.get(self, "filter_expr") @filter_expr.setter def filter_expr(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "filter_expr", value) @property @pulumi.getter(name="flowSampling") def flow_sampling(self) -> Optional[pulumi.Input[float]]: """ Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5 unless otherwise specified by the org policy, which means half of all collected logs are reported. """ return pulumi.get(self, "flow_sampling") @flow_sampling.setter def flow_sampling(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "flow_sampling", value) @property @pulumi.getter def metadata(self) -> Optional[pulumi.Input['SubnetworkLogConfigMetadata']]: """ Can only be specified if VPC flow logs for this subnetwork is enabled. Configures whether all, none or a subset of metadata fields should be added to the reported VPC flow logs. Default is EXCLUDE_ALL_METADATA. """ return pulumi.get(self, "metadata") @metadata.setter def metadata(self, value: Optional[pulumi.Input['SubnetworkLogConfigMetadata']]): pulumi.set(self, "metadata", value) @property @pulumi.getter(name="metadataFields") def metadata_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Can only be specified if VPC flow logs for this subnetwork is enabled and "metadata" was set to CUSTOM_METADATA. """ return pulumi.get(self, "metadata_fields") @metadata_fields.setter def metadata_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "metadata_fields", value) @pulumi.input_type class SubnetworkSecondaryRangeArgs: def __init__(__self__, *, ip_cidr_range: Optional[pulumi.Input[str]] = None, range_name: Optional[pulumi.Input[str]] = None, reserved_internal_range: Optional[pulumi.Input[str]] = None): """ Represents a secondary IP range of a subnetwork. :param pulumi.Input[str] ip_cidr_range: The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. The range can be any range listed in the Valid ranges list. :param pulumi.Input[str] range_name: The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork. :param pulumi.Input[str] reserved_internal_range: The URL of the reserved internal range. """ if ip_cidr_range is not None: pulumi.set(__self__, "ip_cidr_range", ip_cidr_range) if range_name is not None: pulumi.set(__self__, "range_name", range_name) if reserved_internal_range is not None: pulumi.set(__self__, "reserved_internal_range", reserved_internal_range) @property @pulumi.getter(name="ipCidrRange") def ip_cidr_range(self) -> Optional[pulumi.Input[str]]: """ The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. The range can be any range listed in the Valid ranges list. """ return pulumi.get(self, "ip_cidr_range") @ip_cidr_range.setter def ip_cidr_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_cidr_range", value) @property @pulumi.getter(name="rangeName") def range_name(self) -> Optional[pulumi.Input[str]]: """ The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork. """ return pulumi.get(self, "range_name") @range_name.setter def range_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "range_name", value) @property @pulumi.getter(name="reservedInternalRange") def reserved_internal_range(self) -> Optional[pulumi.Input[str]]: """ The URL of the reserved internal range. """ return pulumi.get(self, "reserved_internal_range") @reserved_internal_range.setter def reserved_internal_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "reserved_internal_range", value) @pulumi.input_type class SubsettingArgs: def __init__(__self__, *, policy: Optional[pulumi.Input['SubsettingPolicy']] = None, subset_size: Optional[pulumi.Input[int]] = None): """ Subsetting configuration for this BackendService. Currently this is applicable only for Internal TCP/UDP load balancing, Internal HTTP(S) load balancing and Traffic Director. :param pulumi.Input[int] subset_size: The number of backends per backend group assigned to each proxy instance or each service mesh client. An input parameter to the `CONSISTENT_HASH_SUBSETTING` algorithm. Can only be set if `policy` is set to `CONSISTENT_HASH_SUBSETTING`. Can only be set if load balancing scheme is `INTERNAL_MANAGED` or `INTERNAL_SELF_MANAGED`. `subset_size` is optional for Internal HTTP(S) load balancing and required for Traffic Director. If you do not provide this value, Cloud Load Balancing will calculate it dynamically to optimize the number of proxies/clients visible to each backend and vice versa. Must be greater than 0. If `subset_size` is larger than the number of backends/endpoints, then subsetting is disabled. """ if policy is not None: pulumi.set(__self__, "policy", policy) if subset_size is not None: pulumi.set(__self__, "subset_size", subset_size) @property @pulumi.getter def policy(self) -> Optional[pulumi.Input['SubsettingPolicy']]: return pulumi.get(self, "policy") @policy.setter def policy(self, value: Optional[pulumi.Input['SubsettingPolicy']]): pulumi.set(self, "policy", value) @property @pulumi.getter(name="subsetSize") def subset_size(self) -> Optional[pulumi.Input[int]]: """ The number of backends per backend group assigned to each proxy instance or each service mesh client. An input parameter to the `CONSISTENT_HASH_SUBSETTING` algorithm. Can only be set if `policy` is set to `CONSISTENT_HASH_SUBSETTING`. Can only be set if load balancing scheme is `INTERNAL_MANAGED` or `INTERNAL_SELF_MANAGED`. `subset_size` is optional for Internal HTTP(S) load balancing and required for Traffic Director. If you do not provide this value, Cloud Load Balancing will calculate it dynamically to optimize the number of proxies/clients visible to each backend and vice versa. Must be greater than 0. If `subset_size` is larger than the number of backends/endpoints, then subsetting is disabled. """ return pulumi.get(self, "subset_size") @subset_size.setter def subset_size(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "subset_size", value) @pulumi.input_type class TCPHealthCheckArgs: def __init__(__self__, *, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, port_specification: Optional[pulumi.Input['TCPHealthCheckPortSpecification']] = None, proxy_header: Optional[pulumi.Input['TCPHealthCheckProxyHeader']] = None, request: Optional[pulumi.Input[str]] = None, response: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['TCPHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, TCP health check follows behavior specified in port and portName fields. :param pulumi.Input['TCPHealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request: The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII. :param pulumi.Input[str] response: The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. """ if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if port_specification is not None: pulumi.set(__self__, "port_specification", port_specification) if proxy_header is not None: pulumi.set(__self__, "proxy_header", proxy_header) if request is not None: pulumi.set(__self__, "request", request) if response is not None: pulumi.set(__self__, "response", response) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter(name="portSpecification") def port_specification(self) -> Optional[pulumi.Input['TCPHealthCheckPortSpecification']]: """ Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, TCP health check follows behavior specified in port and portName fields. """ return pulumi.get(self, "port_specification") @port_specification.setter def port_specification(self, value: Optional[pulumi.Input['TCPHealthCheckPortSpecification']]): pulumi.set(self, "port_specification", value) @property @pulumi.getter(name="proxyHeader") def proxy_header(self) -> Optional[pulumi.Input['TCPHealthCheckProxyHeader']]: """ Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. """ return pulumi.get(self, "proxy_header") @proxy_header.setter def proxy_header(self, value: Optional[pulumi.Input['TCPHealthCheckProxyHeader']]): pulumi.set(self, "proxy_header", value) @property @pulumi.getter def request(self) -> Optional[pulumi.Input[str]]: """ The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII. """ return pulumi.get(self, "request") @request.setter def request(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request", value) @property @pulumi.getter def response(self) -> Optional[pulumi.Input[str]]: """ The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. """ return pulumi.get(self, "response") @response.setter def response(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "response", value) @pulumi.input_type class TagsArgs: def __init__(__self__, *, items: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ A set of instance tags. :param pulumi.Input[Sequence[pulumi.Input[str]]] items: An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035. """ if items is not None: pulumi.set(__self__, "items", items) @property @pulumi.getter def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035. """ return pulumi.get(self, "items") @items.setter def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "items", value) @pulumi.input_type class TlsCertificateContextArgs: def __init__(__self__, *, certificate_paths: Optional[pulumi.Input['TlsCertificatePathsArgs']] = None, certificate_source: Optional[pulumi.Input['TlsCertificateContextCertificateSource']] = None, sds_config: Optional[pulumi.Input['SdsConfigArgs']] = None): """ [Deprecated] Defines the mechanism to obtain the client or server certificate. Defines the mechanism to obtain the client or server certificate. :param pulumi.Input['TlsCertificatePathsArgs'] certificate_paths: Specifies the certificate and private key paths. This field is applicable only if tlsCertificateSource is set to USE_PATH. :param pulumi.Input['TlsCertificateContextCertificateSource'] certificate_source: Defines how TLS certificates are obtained. :param pulumi.Input['SdsConfigArgs'] sds_config: Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS. """ if certificate_paths is not None: pulumi.set(__self__, "certificate_paths", certificate_paths) if certificate_source is not None: pulumi.set(__self__, "certificate_source", certificate_source) if sds_config is not None: pulumi.set(__self__, "sds_config", sds_config) @property @pulumi.getter(name="certificatePaths") def certificate_paths(self) -> Optional[pulumi.Input['TlsCertificatePathsArgs']]: """ Specifies the certificate and private key paths. This field is applicable only if tlsCertificateSource is set to USE_PATH. """ return pulumi.get(self, "certificate_paths") @certificate_paths.setter def certificate_paths(self, value: Optional[pulumi.Input['TlsCertificatePathsArgs']]): pulumi.set(self, "certificate_paths", value) @property @pulumi.getter(name="certificateSource") def certificate_source(self) -> Optional[pulumi.Input['TlsCertificateContextCertificateSource']]: """ Defines how TLS certificates are obtained. """ return pulumi.get(self, "certificate_source") @certificate_source.setter def certificate_source(self, value: Optional[pulumi.Input['TlsCertificateContextCertificateSource']]): pulumi.set(self, "certificate_source", value) @property @pulumi.getter(name="sdsConfig") def sds_config(self) -> Optional[pulumi.Input['SdsConfigArgs']]: """ Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS. """ return pulumi.get(self, "sds_config") @sds_config.setter def sds_config(self, value: Optional[pulumi.Input['SdsConfigArgs']]): pulumi.set(self, "sds_config", value) @pulumi.input_type class TlsCertificatePathsArgs: def __init__(__self__, *, certificate_path: Optional[pulumi.Input[str]] = None, private_key_path: Optional[pulumi.Input[str]] = None): """ [Deprecated] The paths to the mounted TLS Certificates and private key. The paths to the mounted TLS Certificates and private key. :param pulumi.Input[str] certificate_path: The path to the file holding the client or server TLS certificate to use. :param pulumi.Input[str] private_key_path: The path to the file holding the client or server private key. """ if certificate_path is not None: pulumi.set(__self__, "certificate_path", certificate_path) if private_key_path is not None: pulumi.set(__self__, "private_key_path", private_key_path) @property @pulumi.getter(name="certificatePath") def certificate_path(self) -> Optional[pulumi.Input[str]]: """ The path to the file holding the client or server TLS certificate to use. """ return pulumi.get(self, "certificate_path") @certificate_path.setter def certificate_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "certificate_path", value) @property @pulumi.getter(name="privateKeyPath") def private_key_path(self) -> Optional[pulumi.Input[str]]: """ The path to the file holding the client or server private key. """ return pulumi.get(self, "private_key_path") @private_key_path.setter def private_key_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "private_key_path", value) @pulumi.input_type class TlsContextArgs: def __init__(__self__, *, certificate_context: Optional[pulumi.Input['TlsCertificateContextArgs']] = None, validation_context: Optional[pulumi.Input['TlsValidationContextArgs']] = None): """ [Deprecated] The TLS settings for the client or server. The TLS settings for the client or server. :param pulumi.Input['TlsCertificateContextArgs'] certificate_context: Defines the mechanism to obtain the client or server certificate. :param pulumi.Input['TlsValidationContextArgs'] validation_context: Defines the mechanism to obtain the Certificate Authority certificate to validate the client/server certificate. If omitted, the proxy will not validate the server or client certificate. """ if certificate_context is not None: pulumi.set(__self__, "certificate_context", certificate_context) if validation_context is not None: pulumi.set(__self__, "validation_context", validation_context) @property @pulumi.getter(name="certificateContext") def certificate_context(self) -> Optional[pulumi.Input['TlsCertificateContextArgs']]: """ Defines the mechanism to obtain the client or server certificate. """ return pulumi.get(self, "certificate_context") @certificate_context.setter def certificate_context(self, value: Optional[pulumi.Input['TlsCertificateContextArgs']]): pulumi.set(self, "certificate_context", value) @property @pulumi.getter(name="validationContext") def validation_context(self) -> Optional[pulumi.Input['TlsValidationContextArgs']]: """ Defines the mechanism to obtain the Certificate Authority certificate to validate the client/server certificate. If omitted, the proxy will not validate the server or client certificate. """ return pulumi.get(self, "validation_context") @validation_context.setter def validation_context(self, value: Optional[pulumi.Input['TlsValidationContextArgs']]): pulumi.set(self, "validation_context", value) @pulumi.input_type class TlsValidationContextArgs: def __init__(__self__, *, certificate_path: Optional[pulumi.Input[str]] = None, sds_config: Optional[pulumi.Input['SdsConfigArgs']] = None, validation_source: Optional[pulumi.Input['TlsValidationContextValidationSource']] = None): """ [Deprecated] Defines the mechanism to obtain the Certificate Authority certificate to validate the client/server certificate. validate the client/server certificate. :param pulumi.Input[str] certificate_path: The path to the file holding the CA certificate to validate the client or server certificate. :param pulumi.Input['SdsConfigArgs'] sds_config: Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS. :param pulumi.Input['TlsValidationContextValidationSource'] validation_source: Defines how TLS certificates are obtained. """ if certificate_path is not None: pulumi.set(__self__, "certificate_path", certificate_path) if sds_config is not None: pulumi.set(__self__, "sds_config", sds_config) if validation_source is not None: pulumi.set(__self__, "validation_source", validation_source) @property @pulumi.getter(name="certificatePath") def certificate_path(self) -> Optional[pulumi.Input[str]]: """ The path to the file holding the CA certificate to validate the client or server certificate. """ return pulumi.get(self, "certificate_path") @certificate_path.setter def certificate_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "certificate_path", value) @property @pulumi.getter(name="sdsConfig") def sds_config(self) -> Optional[pulumi.Input['SdsConfigArgs']]: """ Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS. """ return pulumi.get(self, "sds_config") @sds_config.setter def sds_config(self, value: Optional[pulumi.Input['SdsConfigArgs']]): pulumi.set(self, "sds_config", value) @property @pulumi.getter(name="validationSource") def validation_source(self) -> Optional[pulumi.Input['TlsValidationContextValidationSource']]: """ Defines how TLS certificates are obtained. """ return pulumi.get(self, "validation_source") @validation_source.setter def validation_source(self, value: Optional[pulumi.Input['TlsValidationContextValidationSource']]): pulumi.set(self, "validation_source", value) @pulumi.input_type class UDPHealthCheckArgs: def __init__(__self__, *, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, request: Optional[pulumi.Input[str]] = None, response: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[int] port: The UDP port number for the health check request. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input[str] request: Raw data of request to send in payload of UDP packet. It is an error if this is empty. The request data can only be ASCII. :param pulumi.Input[str] response: The bytes to match against the beginning of the response data. It is an error if this is empty. The response data can only be ASCII. """ if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if request is not None: pulumi.set(__self__, "request", request) if response is not None: pulumi.set(__self__, "response", response) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The UDP port number for the health check request. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter def request(self) -> Optional[pulumi.Input[str]]: """ Raw data of request to send in payload of UDP packet. It is an error if this is empty. The request data can only be ASCII. """ return pulumi.get(self, "request") @request.setter def request(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request", value) @property @pulumi.getter def response(self) -> Optional[pulumi.Input[str]]: """ The bytes to match against the beginning of the response data. It is an error if this is empty. The response data can only be ASCII. """ return pulumi.get(self, "response") @response.setter def response(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "response", value) @pulumi.input_type class UrlMapTestHeaderArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ HTTP headers used in UrlMapTests. :param pulumi.Input[str] name: Header name. :param pulumi.Input[str] value: Header value. """ if name is not None: pulumi.set(__self__, "name", name) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Header name. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ Header value. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class UrlMapTestArgs: def __init__(__self__, *, backend_service_weight: Optional[pulumi.Input[int]] = None, description: Optional[pulumi.Input[str]] = None, expected_output_url: Optional[pulumi.Input[str]] = None, expected_redirect_response_code: Optional[pulumi.Input[int]] = None, headers: Optional[pulumi.Input[Sequence[pulumi.Input['UrlMapTestHeaderArgs']]]] = None, host: Optional[pulumi.Input[str]] = None, path: Optional[pulumi.Input[str]] = None, service: Optional[pulumi.Input[str]] = None): """ Message for the expected URL mappings. :param pulumi.Input[int] backend_service_weight: The weight to use for the supplied host and path when using advanced routing rules that involve traffic splitting. :param pulumi.Input[str] description: Description of this test case. :param pulumi.Input[str] expected_output_url: The expected output URL evaluated by the load balancer containing the scheme, host, path and query parameters. For rules that forward requests to backends, the test passes only when expectedOutputUrl matches the request forwarded by the load balancer to backends. For rules with urlRewrite, the test verifies that the forwarded request matches hostRewrite and pathPrefixRewrite in the urlRewrite action. When service is specified, expectedOutputUrl`s scheme is ignored. For rules with urlRedirect, the test passes only if expectedOutputUrl matches the URL in the load balancer's redirect response. If urlRedirect specifies https_redirect, the test passes only if the scheme in expectedOutputUrl is also set to HTTPS. If urlRedirect specifies strip_query, the test passes only if expectedOutputUrl does not contain any query parameters. expectedOutputUrl is optional when service is specified. :param pulumi.Input[int] expected_redirect_response_code: For rules with urlRedirect, the test passes only if expectedRedirectResponseCode matches the HTTP status code in load balancer's redirect response. expectedRedirectResponseCode cannot be set when service is set. :param pulumi.Input[Sequence[pulumi.Input['UrlMapTestHeaderArgs']]] headers: HTTP headers for this request. If headers contains a host header, then host must also match the header value. :param pulumi.Input[str] host: Host portion of the URL. If headers contains a host header, then host must also match the header value. :param pulumi.Input[str] path: Path portion of the URL. :param pulumi.Input[str] service: Expected BackendService or BackendBucket resource the given URL should be mapped to. The service field cannot be set if expectedRedirectResponseCode is set. """ if backend_service_weight is not None: pulumi.set(__self__, "backend_service_weight", backend_service_weight) if description is not None: pulumi.set(__self__, "description", description) if expected_output_url is not None: pulumi.set(__self__, "expected_output_url", expected_output_url) if expected_redirect_response_code is not None: pulumi.set(__self__, "expected_redirect_response_code", expected_redirect_response_code) if headers is not None: pulumi.set(__self__, "headers", headers) if host is not None: pulumi.set(__self__, "host", host) if path is not None: pulumi.set(__self__, "path", path) if service is not None: pulumi.set(__self__, "service", service) @property @pulumi.getter(name="backendServiceWeight") def backend_service_weight(self) -> Optional[pulumi.Input[int]]: """ The weight to use for the supplied host and path when using advanced routing rules that involve traffic splitting. """ return pulumi.get(self, "backend_service_weight") @backend_service_weight.setter def backend_service_weight(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "backend_service_weight", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Description of this test case. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="expectedOutputUrl") def expected_output_url(self) -> Optional[pulumi.Input[str]]: """ The expected output URL evaluated by the load balancer containing the scheme, host, path and query parameters. For rules that forward requests to backends, the test passes only when expectedOutputUrl matches the request forwarded by the load balancer to backends. For rules with urlRewrite, the test verifies that the forwarded request matches hostRewrite and pathPrefixRewrite in the urlRewrite action. When service is specified, expectedOutputUrl`s scheme is ignored. For rules with urlRedirect, the test passes only if expectedOutputUrl matches the URL in the load balancer's redirect response. If urlRedirect specifies https_redirect, the test passes only if the scheme in expectedOutputUrl is also set to HTTPS. If urlRedirect specifies strip_query, the test passes only if expectedOutputUrl does not contain any query parameters. expectedOutputUrl is optional when service is specified. """ return pulumi.get(self, "expected_output_url") @expected_output_url.setter def expected_output_url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "expected_output_url", value) @property @pulumi.getter(name="expectedRedirectResponseCode") def expected_redirect_response_code(self) -> Optional[pulumi.Input[int]]: """ For rules with urlRedirect, the test passes only if expectedRedirectResponseCode matches the HTTP status code in load balancer's redirect response. expectedRedirectResponseCode cannot be set when service is set. """ return pulumi.get(self, "expected_redirect_response_code") @expected_redirect_response_code.setter def expected_redirect_response_code(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "expected_redirect_response_code", value) @property @pulumi.getter def headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UrlMapTestHeaderArgs']]]]: """ HTTP headers for this request. If headers contains a host header, then host must also match the header value. """ return pulumi.get(self, "headers") @headers.setter def headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['UrlMapTestHeaderArgs']]]]): pulumi.set(self, "headers", value) @property @pulumi.getter def host(self) -> Optional[pulumi.Input[str]]: """ Host portion of the URL. If headers contains a host header, then host must also match the header value. """ return pulumi.get(self, "host") @host.setter def host(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host", value) @property @pulumi.getter def path(self) -> Optional[pulumi.Input[str]]: """ Path portion of the URL. """ return pulumi.get(self, "path") @path.setter def path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "path", value) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ Expected BackendService or BackendBucket resource the given URL should be mapped to. The service field cannot be set if expectedRedirectResponseCode is set. """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @pulumi.input_type class UrlRewriteArgs: def __init__(__self__, *, host_rewrite: Optional[pulumi.Input[str]] = None, path_prefix_rewrite: Optional[pulumi.Input[str]] = None): """ The spec for modifying the path before sending the request to the matched backend service. :param pulumi.Input[str] host_rewrite: Before forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite. The value must be from 1 to 255 characters. :param pulumi.Input[str] path_prefix_rewrite: Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. The value must be from 1 to 1024 characters. """ if host_rewrite is not None: pulumi.set(__self__, "host_rewrite", host_rewrite) if path_prefix_rewrite is not None: pulumi.set(__self__, "path_prefix_rewrite", path_prefix_rewrite) @property @pulumi.getter(name="hostRewrite") def host_rewrite(self) -> Optional[pulumi.Input[str]]: """ Before forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite. The value must be from 1 to 255 characters. """ return pulumi.get(self, "host_rewrite") @host_rewrite.setter def host_rewrite(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host_rewrite", value) @property @pulumi.getter(name="pathPrefixRewrite") def path_prefix_rewrite(self) -> Optional[pulumi.Input[str]]: """ Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. The value must be from 1 to 1024 characters. """ return pulumi.get(self, "path_prefix_rewrite") @path_prefix_rewrite.setter def path_prefix_rewrite(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "path_prefix_rewrite", value) @pulumi.input_type class VpnGatewayVpnGatewayInterfaceArgs: def __init__(__self__, *, interconnect_attachment: Optional[pulumi.Input[str]] = None): """ A VPN gateway interface. :param pulumi.Input[str] interconnect_attachment: URL of the VLAN attachment (interconnectAttachment) resource for this VPN gateway interface. When the value of this field is present, the VPN gateway is used for IPsec-encrypted Cloud Interconnect; all egress or ingress traffic for this VPN gateway interface goes through the specified VLAN attachment resource. Not currently available publicly. """ if interconnect_attachment is not None: pulumi.set(__self__, "interconnect_attachment", interconnect_attachment) @property @pulumi.getter(name="interconnectAttachment") def interconnect_attachment(self) -> Optional[pulumi.Input[str]]: """ URL of the VLAN attachment (interconnectAttachment) resource for this VPN gateway interface. When the value of this field is present, the VPN gateway is used for IPsec-encrypted Cloud Interconnect; all egress or ingress traffic for this VPN gateway interface goes through the specified VLAN attachment resource. Not currently available publicly. """ return pulumi.get(self, "interconnect_attachment") @interconnect_attachment.setter def interconnect_attachment(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "interconnect_attachment", value) @pulumi.input_type class WeightedBackendServiceArgs: def __init__(__self__, *, backend_service: Optional[pulumi.Input[str]] = None, header_action: Optional[pulumi.Input['HttpHeaderActionArgs']] = None, weight: Optional[pulumi.Input[int]] = None): """ In contrast to a single BackendService in HttpRouteAction to which all matching traffic is directed to, WeightedBackendService allows traffic to be split across multiple backend services. The volume of traffic for each backend service is proportional to the weight specified in each WeightedBackendService :param pulumi.Input[str] backend_service: The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the load balancer applies any relevant headerActions specified as part of this backendServiceWeight. :param pulumi.Input['HttpHeaderActionArgs'] header_action: Specifies changes to request and response headers that need to take effect for the selected backendService. headerAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. headerAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[int] weight: Specifies the fraction of traffic sent to a backend service, computed as weight / (sum of all weightedBackendService weights in routeAction) . The selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backend service, subsequent requests are sent to the same backend service as determined by the backend service's session affinity policy. The value must be from 0 to 1000. """ if backend_service is not None: pulumi.set(__self__, "backend_service", backend_service) if header_action is not None: pulumi.set(__self__, "header_action", header_action) if weight is not None: pulumi.set(__self__, "weight", weight) @property @pulumi.getter(name="backendService") def backend_service(self) -> Optional[pulumi.Input[str]]: """ The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the load balancer applies any relevant headerActions specified as part of this backendServiceWeight. """ return pulumi.get(self, "backend_service") @backend_service.setter def backend_service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "backend_service", value) @property @pulumi.getter(name="headerAction") def header_action(self) -> Optional[pulumi.Input['HttpHeaderActionArgs']]: """ Specifies changes to request and response headers that need to take effect for the selected backendService. headerAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. headerAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "header_action") @header_action.setter def header_action(self, value: Optional[pulumi.Input['HttpHeaderActionArgs']]): pulumi.set(self, "header_action", value) @property @pulumi.getter def weight(self) -> Optional[pulumi.Input[int]]: """ Specifies the fraction of traffic sent to a backend service, computed as weight / (sum of all weightedBackendService weights in routeAction) . The selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backend service, subsequent requests are sent to the same backend service as determined by the backend service's session affinity policy. The value must be from 0 to 1000. """ return pulumi.get(self, "weight") @weight.setter def weight(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "weight", value)
59.43734
1,975
0.711867
[ "Apache-2.0" ]
AaronFriel/pulumi-google-native
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
882,169
Python
import sys import matplotlib.pyplot as plt import os root_path = os.path.dirname(os.path.abspath('__file__')) sys.path.append(root_path) from tools.models import one_step_esvr, one_step_esvr_multi_seed from Xianyang_dwt.projects.variables import variables if __name__ == '__main__': one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_1_ahead_forecast_pacf_traindev_test',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_1_ahead_forecast_pacf_train_val',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_1_ahead_forecast_pacf_traindev_append',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) for leading_time in [1,3,5,7,9]: one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_'+str(leading_time)+'_ahead_forecast_pacf',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) for leading_time in [1,3,5,7,9]: one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_'+str(leading_time)+'_ahead_forecast_pcc_local',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_1_ahead_forecast_pacf_pca28',#+str(i),# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_1_ahead_forecast_pacf_pcamle',#+str(i),# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) num_in_one = sum(variables['lags_dict']['db10-2'].values()) for n_components in range(num_in_one-16,num_in_one+1): one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_1_ahead_forecast_pacf_pca'+str(n_components),# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, )
40.125
158
0.688474
[ "MIT" ]
zjy8006/MonthlyRunoffForecastByAutoReg
Xianyang_dwt/projects/esvr_one_step.py
2,889
Python
import objc from Foundation import * from AppKit import * from PyObjCTools import NibClassBuilder, AppHelper class StatusBar(NSObject): def setFront(self): NSRunningApplication.currentApplication_().activateWithOptions_(NSApplicationActivateIgnoringOtherApps) def applicationDidFinishLaunching_(self, notification): self.statusbar = NSStatusBar.systemStatusBar() self.statusitem = self.statusbar.statusItemWithLength_(NSVariableStatusItemLength) self.image = NSImage.alloc().initByReferencingFile_('Logo.png') self.statusitem.setImage_(self.image) self.statusitem.setHighlightMode_(1) self.statusitem.setToolTip_('pyRotateDisplay') self.menu = NSMenu.alloc().init() menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Settings', 'settings:', '') self.menu.addItem_(menuitem) menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Quit', 'terminate:', '') self.menu.addItem_(menuitem) self.statusitem.setMenu_(self.menu) def applicationWillTerminate_(self, notification): self.quitCallBack() def setCallBack(self,cb): self.callback = cb def setQuitCallBack(self,cb): self.quitCallBack = cb def settings_(self, notification): self.callback() if __name__ == "__main__": app = NSApplication.sharedApplication() delegate = StatusBar.alloc().init() app.setDelegate_(delegate) AppHelper.runEventLoop()
35.857143
111
0.719788
[ "MIT" ]
Licht-T/pyRotateDisplayForMac
pyRotateDisplayStatusBar.py
1,506
Python
# -*- coding: utf-8 -*- # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Test operator_utils.""" from mindquantum.core.operators import ( FermionOperator, QubitExcitationOperator, QubitOperator, ) from mindquantum.core.operators.utils import ( commutator, count_qubits, down_index, hermitian_conjugated, normal_ordered, number_operator, up_index, ) def test_count_qubits(): """Test count_qubits""" qubit_op = QubitOperator("X1 Y2") assert count_qubits(qubit_op) == 3 fer_op = FermionOperator("1^") assert count_qubits(fer_op) == 2 qubit_exc_op = QubitExcitationOperator("4^ 1") assert count_qubits(qubit_exc_op) == 5 def test_normal_ordered(): """Test normal_ordered function""" op = FermionOperator("3 4^") assert str(normal_ordered(op)) == '-1 [4^ 3] ' def test_commutator(): """Test commutator""" qub_op1 = QubitOperator("X1 Y2") qub_op2 = QubitOperator("X1 Z2") qub_op3 = 2j * QubitOperator("X2") assert commutator(qub_op1, qub_op2) == qub_op3 assert commutator(qub_op1, qub_op1) == QubitOperator() qubit_exc_op1 = QubitExcitationOperator(((4, 1), (1, 0)), 2.0j) qubit_exc_op2 = QubitExcitationOperator(((3, 1), (2, 0)), 2.0j) qubit_exc_op3 = QubitExcitationOperator("3^ 2 4^ 1", 4.0) + QubitExcitationOperator("4^ 1 3^ 2", -4.0) assert commutator(qubit_exc_op1, qubit_exc_op2).compress() == qubit_exc_op3 assert commutator(qubit_exc_op1, qubit_exc_op1) == QubitExcitationOperator() def test_number_operator(): """Test number operator""" nmode = 3 # other parameters by default check_str = '1 [0^ 0] +\n1 [1^ 1] +\n1 [2^ 2] ' assert str(number_operator(nmode)) == check_str check_str2 = '1 [3^ 3] ' assert str(number_operator(None, nmode)) == check_str2 def test_up_index(): """This is for labelling the spin-orbital index with spin alpha""" alpha = 2 assert up_index(alpha) == 4 def test_down_index(): """This is for labelling the spin-orbital index with spin beta""" beta = 1 assert down_index(beta) == 3 def test_hermitian_conjugated(): """Test hermitian_conjugated for the QubitOperator and Fermion Operator""" qub_op1 = -1j * QubitOperator("X1 Y2") + QubitOperator("X1") qub_op2 = 1j * QubitOperator("X1 Y2") + QubitOperator("X1") assert hermitian_conjugated(qub_op1) == qub_op2 fer_op1 = FermionOperator("1^ 2") fer_op2 = FermionOperator("2^ 1") assert hermitian_conjugated(fer_op1) == fer_op2 qubit_exc_op1 = QubitExcitationOperator(((4, 1), (1, 0)), 2.0j).normal_ordered() qubit_exc_op2 = QubitExcitationOperator(((4, 0), (1, 1)), -2.0j).normal_ordered() assert hermitian_conjugated(qubit_exc_op1) == qubit_exc_op2
31.962264
106
0.677391
[ "Apache-2.0" ]
Takishima/mindquantum
tests/st/test_core/test_operators/test_operators_utils.py
3,388
Python
import os import soft_renderer.functional as srf import torch, random import numpy as np import tqdm from haven import haven_utils as hu from PIL import Image, ImageOps, ImageFilter import torchvision.transforms as transforms class_ids_map = { '02691156': 'Airplane', '02828884': 'Bench', '02933112': 'Cabinet', '02958343': 'Car', '03001627': 'Chair', '03211117': 'Display', '03636649': 'Lamp', '03691459': 'Loudspeaker', '04090263': 'Rifle', '04256520': 'Sofa', '04379243': 'Table', '04401088': 'Telephone', '04530566': 'Watercraft', } CLASS_IDS = sorted(list(class_ids_map.keys())) class ShapeNet(object): def __init__(self, directory=None, split=None, exp_dict=None): self.class_ids = CLASS_IDS n_classes = exp_dict.get('n_classes') if n_classes: self.class_ids = CLASS_IDS[:n_classes] classes = exp_dict.get('classes') if classes: classes_map = {key: value for (value, key) in class_ids_map.items()} self.class_ids = sorted([classes_map[k] for k in classes]) self.split = split self.elevation = 30. self.distance = 2.732 self.exp_dict = exp_dict self.class_ids_map = class_ids_map self.images = [] self.voxels = [] self.labels = [] self.class_ids_pair = list(zip(self.class_ids, [self.class_ids_map[i] for i in self.class_ids])) self.num_data = {} self.pos = {} count = 0 # ind2class = {key: value for (value, key) in enumerate(self.class_ids)} loop = tqdm.tqdm(self.class_ids) loop.set_description(f'Loading {split} Dataset') n_train_objects = exp_dict.get('n_train_objects') n_ratio_val = exp_dict.get('n_val_ratio') # assert n_ratio_val is not None if n_train_objects is None and split == 'unlabeled': return if split in ['train', 'unlabeled']: set_name = 'train' elif split in ['val', 'test']: set_name = 'val' if n_ratio_val is None: set_name = split for ci, class_id in enumerate(loop): i = list(np.load(os.path.join(directory, '%s_%s_images.npz' % (class_id, set_name))).items())[0][1] v = list(np.load(os.path.join(directory, '%s_%s_voxels.npz' % (class_id, set_name))).items())[0][1] # train get only first n if split == 'train' and n_train_objects is not None: n = n_train_objects i = i[:n] v = v[:n] # unlabeled get only first n if split == 'unlabeled' and n_train_objects is not None: n = n_train_objects i = i[n:] v = v[n:] elif split == 'val' and n_ratio_val is not None: n = int(i.shape[0]*n_ratio_val) i = i[:n] v = v[:n] elif split == 'test' and n_ratio_val is not None: n = int(i.shape[0]*n_ratio_val) i = i[n:] v = v[n:] self.images += [i] self.voxels += [v] self.labels += [torch.ones(i.shape[0]) * ci] self.images = np.concatenate(self.images, axis=0) self.images = torch.from_numpy(self.images.astype('float32') / 255.) self.voxels = np.concatenate(self.voxels, axis=0) self.voxels = torch.from_numpy(self.voxels.astype('float32')) self.labels = torch.cat(self.labels, dim=0) # positible view points distances = torch.ones(24).float() * self.distance elevations = torch.ones(24).float() * self.elevation self.possible_viewpoints = srf.get_points_from_angles(distances, elevations, -torch.arange(24) * 15) print(f'{split} samples: {len(self)}') def __len__(self): if isinstance(self.images, list): return len(self.images) return self.images.shape[0] def __getitem__(self, idx, vp_idx=None, vp_idx_b=None): # image A images_a, viewpoints_a, viewpoint_id_a = self.get_random_viewpoint(idx, vp_idx) # image B images_b, viewpoints_b, viewpoint_id_b = self.get_random_viewpoint(idx, vp_idx_b) return {'images_a':images_a, 'viewpoints_a': viewpoints_a, 'object_id_a':idx, 'viewpoint_id_a':viewpoint_id_a, 'images_b':images_b, 'viewpoints_b': viewpoints_b, 'object_id_b':idx, 'viewpoint_id_b':viewpoint_id_b} def insert_images(self, images): self.images = torch.cat([self.images, images], dim=0) def pop_indices(self, ind_list): selected_images = self.images[ind_list] keep_idx = np.delete(np.arange(self.images.shape[0]), ind_list) self.images = self.images[keep_idx] # return list(np.delete(arr, id_to_del)) return selected_images def get_random_viewpoint(self, idx, vp_idx=None): if vp_idx is None: viewpoint_id = np.random.randint(0, 24) else: viewpoint_id = vp_idx # get image and viewpoint images = self.images[idx][viewpoint_id] # get viewpoint viewpoints = srf.get_points_from_angles(self.distance, self.elevation, -viewpoint_id * 15) return images, torch.as_tensor(viewpoints), viewpoint_id def get_all_batches_for_evaluation(self, batch_size, class_id): assert self.images.shape[0] == self.voxels.shape[0] ci = self.class_ids.index(class_id) ind_ci = self.labels == ci im_cls = self.images[ind_ci] vx_cls = self.voxels[ind_ci] data_ids = np.arange(im_cls.shape[0]) viewpoint_ids = np.tile(np.arange(24), data_ids.size) data_ids = np.repeat(data_ids, 24) * 24 + viewpoint_ids distances = torch.ones(data_ids.size).float() * self.distance elevations = torch.ones(data_ids.size).float() * self.elevation viewpoints_all = srf.get_points_from_angles(distances, elevations, -torch.from_numpy(viewpoint_ids).float() * 15) shape = im_cls.shape[-3:] images = im_cls.view(-1, *shape) shape = vx_cls.shape[-3:] voxels = vx_cls.view(-1, *shape) for i in range((data_ids.size - 1) // batch_size + 1): im = images[data_ids[i * batch_size:(i + 1) * batch_size]] vx = voxels[data_ids[i * batch_size:(i + 1) * batch_size] // 24] yield im, vx class Transform: def __init__(self): self.transform = transforms.Compose([ transforms.ToPILImage(), transforms.RandomResizedCrop(224, interpolation=Image.BICUBIC), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply( [transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)], p=0.8 ), transforms.RandomGrayscale(p=0.2), GaussianBlur(p=1.0), Solarization(p=0.0), transforms.ToTensor(), # transforms.Normalize(mean=[0.485, 0.456, 0.406], # std=[0.229, 0.224, 0.225]) ]) self.transform_prime = transforms.Compose([ transforms.ToPILImage(), transforms.RandomResizedCrop(224, interpolation=Image.BICUBIC), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply( [transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)], p=0.8 ), transforms.RandomGrayscale(p=0.2), GaussianBlur(p=0.1), Solarization(p=0.2), transforms.ToTensor(), # transforms.Normalize(mean=[0.485, 0.456, 0.406], # std=[0.229, 0.224, 0.225]) ]) def __call__(self, x): y1 = self.transform(x) y2 = self.transform_prime(x) return y1, y2 class GaussianBlur(object): def __init__(self, p): self.p = p def __call__(self, img): if random.random() < self.p: sigma = random.random() * 1.9 + 0.1 return img.filter(ImageFilter.GaussianBlur(sigma)) else: return img class Solarization(object): def __init__(self, p): self.p = p def __call__(self, img): if random.random() < self.p: return ImageOps.solarize(img) else: return img
34.557312
121
0.566396
[ "Apache-2.0" ]
IssamLaradji/SSR
src/datasets.py
8,743
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown Copyright 2017-2020 Met Office. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Script to calculate sleet probability.""" from improver import cli @cli.clizefy @cli.with_output def process(snow: cli.inputcube, rain: cli.inputcube): """Calculate sleet probability. Calculates the sleet probability using the calculate_sleet_probability plugin. Args: snow (iris.cube.Cube): An iris Cube of the probability of snow. rain (iris.cube.Cube): An iris Cube of the probability of rain. Returns: iris.cube.Cube: Returns a cube with the probability of sleet. """ from improver.calculate_sleet_prob import calculate_sleet_probability result = calculate_sleet_probability(snow, rain) return result
39.933333
79
0.724958
[ "BSD-3-Clause" ]
BelligerG/improver
improver/cli/sleet_probability.py
2,396
Python
# Copyright 2019 The TensorTrade Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License import numpy as np from typing import Dict, Tuple from tensortrade.base import Identifiable from tensortrade.base.exceptions import InsufficientFundsForAllocation from tensortrade.instruments import Quantity class Wallet(Identifiable): """A wallet stores the balance of a specific instrument on a specific exchange.""" def __init__(self, exchange: 'Exchange', quantity: 'Quantity'): self._exchange = exchange self._instrument = quantity.instrument self._balance = quantity self._locked = {} @classmethod def from_tuple(cls, wallet_tuple: Tuple['Exchange', 'Instrument', float]): exchange, instrument, balance = wallet_tuple return cls(exchange, Quantity(instrument, balance)) @property def exchange(self) -> 'Exchange': return self._exchange @exchange.setter def exchange(self, exchange: 'Exchange'): raise ValueError("You cannot change a Wallet's Exchange after initialization.") @property def instrument(self) -> 'Instrument': return self._instrument @instrument.setter def instrument(self, instrument: 'Exchange'): raise ValueError("You cannot change a Wallet's Instrument after initialization.") @property def balance(self) -> 'Quantity': """The total balance of the wallet available for use.""" return self._balance @balance.setter def balance(self, balance: 'Quantity'): self._balance = balance @property def locked_balance(self) -> 'Quantity': """The total balance of the wallet locked in orders.""" locked_balance = Quantity(self.instrument, 0) for quantity in self.locked.values(): locked_balance += quantity.size return locked_balance @property def total_balance(self) -> 'Quantity': """The total balance of the wallet, both available for use and locked in orders.""" total_balance = self._balance for quantity in self.locked.values(): total_balance += quantity.size return total_balance @property def locked(self) -> Dict[str, 'Quantity']: return self._locked def deallocate(self, path_id: str): if path_id in self.locked.keys(): quantity = self.locked.pop(path_id, None) if quantity is not None: self += quantity.size * self.instrument def __iadd__(self, quantity: 'Quantity') -> 'Wallet': if quantity.is_locked: if quantity.path_id not in self.locked.keys(): self._locked[quantity.path_id] = quantity else: self._locked[quantity.path_id] += quantity else: self._balance += quantity return self def __isub__(self, quantity: 'Quantity') -> 'Wallet': if quantity.is_locked and self.locked[quantity.path_id]: if quantity > self.locked[quantity.path_id]: raise InsufficientFundsForAllocation(self.locked[quantity.path_id], quantity.size) self._locked[quantity.path_id] -= quantity elif not quantity.is_locked: if quantity > self._balance: raise InsufficientFundsForAllocation(self.balance, quantity.size) self._balance -= quantity return self def __str__(self): return '<Wallet: balance={}, locked={}>'.format(self.balance, self.locked_balance) def __repr__(self): return str(self)
33.121951
98
0.665194
[ "Apache-2.0" ]
Kukunin/tensortrade
tensortrade/wallets/wallet.py
4,074
Python
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('mig_main', '0003_officerposition_position_type'), ] operations = [ migrations.CreateModel( name='Committee', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128)), ('description', models.TextField()), ('is_active', models.BooleanField(default=True)), ], options={ }, bases=(models.Model,), ), ]
27.259259
114
0.559783
[ "Apache-2.0" ]
atish3/mig-website
mig_main/migrations/0004_committee.py
736
Python
from abc import ABC, abstractmethod import slippy.core as core from numbers import Number from slippy.contact._step_utils import make_interpolation_func class _TransientSubModelABC(core._SubModelABC, ABC): def __init__(self, name, requires, provides, transient_values, transient_names, interpolation_mode): self.updated_dict = dict() self.update_funcs = dict() for key, value in zip(transient_names, transient_values): if isinstance(value, Number): self.updated_dict[key] = value else: self.updated_dict[key] = None self.update_funcs[key] = make_interpolation_func(value, interpolation_mode, key) super().__init__(name, requires, set(list(provides) + list(transient_names))) def update_transience(self, time): relative_time = (time - self.model.current_step_start_time) / self.model.current_step.max_time for key, value in self.update_funcs.items(): self.updated_dict[key] = float(self.update_funcs[key](relative_time)) def solve(self, current_state: dict) -> dict: self.update_transience(current_state['time']) rtn_dict = self._solve(current_state, **self.updated_dict) rtn_dict.update(self.updated_dict) return rtn_dict @abstractmethod def _solve(self, current_state: dict, **kwargs) -> dict: pass
39.971429
104
0.689778
[ "MIT" ]
FrictionTribologyEnigma/slippy
slippy/contact/sub_models/_TransientSubModelABC.py
1,399
Python
# -*- coding: utf-8 -*- # Author: Jev Kuznetsov <[email protected]> # License: BSD """ Toolset working with yahoo finance data This module includes functions for easy access to YahooFinance data Functions ---------- - `getHistoricData` get historic data for a single symbol - `getQuote` get current quote for a symbol - `getScreenerSymbols` load symbols from a yahoo stock screener file Classes --------- - `HistData` a class for working with multiple symbols """ from datetime import datetime, date import urllib2 from pandas import DataFrame, Index, HDFStore, WidePanel import numpy as np import os from extra import ProgressBar def parseStr(s): ''' convert string to a float or string ''' f = s.strip() if f[0] == '"': return f.strip('"') elif f=='N/A': return np.nan else: try: # try float conversion prefixes = {'M':1e6, 'B': 1e9} prefix = f[-1] if prefix in prefixes: # do we have a Billion/Million character? return float(f[:-1])*prefixes[prefix] else: # no, convert to float directly return float(f) except ValueError: # failed, return original string return s class HistData(object): ''' a class for working with yahoo finance data ''' def __init__(self, autoAdjust=True): self.startDate = (2008,1,1) self.autoAdjust=autoAdjust self.wp = WidePanel() def load(self,dataFile): """load data from HDF""" if os.path.exists(dataFile): store = HDFStore(dataFile) symbols = [str(s).strip('/') for s in store.keys() ] data = dict(zip(symbols,[store[symbol] for symbol in symbols])) self.wp = WidePanel(data) store.close() else: raise IOError('Data file does not exist') def save(self,dataFile): """ save data to HDF""" print 'Saving data to', dataFile store = HDFStore(dataFile) for symbol in self.wp.items: store[symbol] = self.wp[symbol] store.close() def downloadData(self,symbols='all'): ''' get data from yahoo ''' if symbols == 'all': symbols = self.symbols #store = HDFStore(self.dataFile) p = ProgressBar(len(symbols)) for idx,symbol in enumerate(symbols): try: df = getHistoricData(symbol,self.startDate,verbose=False) if self.autoAdjust: df = _adjust(df,removeOrig=True) if len(self.symbols)==0: self.wp = WidePanel({symbol:df}) else: self.wp[symbol] = df except Exception,e: print e p.animate(idx+1) def getDataFrame(self,field='close'): ''' return a slice on wide panel for a given field ''' return self.wp.minor_xs(field) @property def symbols(self): return self.wp.items.tolist() def __repr__(self): return str(self.wp) def getQuote(symbols): ''' get current yahoo quote, return a DataFrame ''' # for codes see: http://www.gummy-stuff.org/Yahoo-data.htm if not isinstance(symbols,list): symbols = [symbols] header = ['symbol','last','change_pct','PE','time','short_ratio','prev_close','eps','market_cap'] request = str.join('', ['s', 'l1', 'p2' , 'r', 't1', 's7', 'p', 'e' , 'j1']) data = dict(zip(header,[[] for i in range(len(header))])) urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request) try: lines = urllib2.urlopen(urlStr).readlines() except Exception, e: s = "Failed to download:\n{0}".format(e); print s for line in lines: fields = line.strip().split(',') #print fields, len(fields) for i,field in enumerate(fields): data[header[i]].append( parseStr(field)) idx = data.pop('symbol') return DataFrame(data,index=idx) def _historicDataUrll(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3]): """ generate url symbol: Yahoo finanance symbol sDate: start date (y,m,d) eDate: end date (y,m,d) """ urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\ format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0]) return urlStr def getHistoricData(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3],verbose=True): """ get data from Yahoo finance and return pandas dataframe symbol: Yahoo finanance symbol sDate: start date (y,m,d) eDate: end date (y,m,d) """ urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\ format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0]) try: lines = urllib2.urlopen(urlStr).readlines() except Exception, e: s = "Failed to download:\n{0}".format(e); print s dates = [] data = [[] for i in range(6)] #high # header : Date,Open,High,Low,Close,Volume,Adj Close for line in lines[1:]: #print line fields = line.rstrip().split(',') dates.append(datetime.strptime( fields[0],'%Y-%m-%d')) for i,field in enumerate(fields[1:]): data[i].append(float(field)) idx = Index(dates) data = dict(zip(['open','high','low','close','volume','adj_close'],data)) # create a pandas dataframe structure df = DataFrame(data,index=idx).sort() if verbose: print 'Got %i days of data' % len(df) return df def _adjust(df, removeOrig=False): ''' _adjustust hist data based on adj_close field ''' c = df['close']/df['adj_close'] df['adj_open'] = df['open']/c df['adj_high'] = df['high']/c df['adj_low'] = df['low']/c if removeOrig: df=df.drop(['open','close','high','low'],axis=1) renames = dict(zip(['adj_open','adj_close','adj_high','adj_low'],['open','close','high','low'])) df=df.rename(columns=renames) return df def getScreenerSymbols(fileName): ''' read symbols from a .csv saved by yahoo stock screener ''' with open(fileName,'r') as fid: lines = fid.readlines() symbols = [] for line in lines[3:]: fields = line.strip().split(',') field = fields[0].strip() if len(field) > 0: symbols.append(field) return symbols
29.485714
120
0.525748
[ "BSD-3-Clause" ]
zhuoqiang/trading-with-python
lib/yahooFinance.py
7,224
Python
#!/usr/bin/env python # -*- coding: UTF-8 -*- # # Yahoo! Finance market data downloader (+fix for Pandas Datareader) # https://github.com/ranaroussi/yfinance """Yahoo! Finance market data downloader (+fix for Pandas Datareader)""" from setuptools import setup, find_packages # from codecs import open import io from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with io.open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name='yfinance', version="0.1.46", description='Yahoo! Finance market data downloader', long_description=long_description, url='https://github.com/ranaroussi/yfinance', author='Ran Aroussi', author_email='[email protected]', license='Apache', classifiers=[ 'License :: OSI Approved :: Apache Software License', # 'Development Status :: 3 - Alpha', # 'Development Status :: 4 - Beta', 'Development Status :: 5 - Production/Stable', 'Operating System :: OS Independent', 'Intended Audience :: Developers', 'Topic :: Office/Business :: Financial', 'Topic :: Office/Business :: Financial :: Investment', 'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], platforms=['any'], keywords='pandas, yahoo finance, pandas datareader', packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']), install_requires=['pandas>=0.24', 'numpy>=1.15', 'requests>=2.20', 'multitasking>=0.0.7'], entry_points={ 'console_scripts': [ 'sample=sample:main', ], }, )
34
82
0.630665
[ "Apache-2.0" ]
Ryan-Qiyu-Jiang/yfinance
setup.py
2,074
Python
""" render_fmo.py renders obj file to rgb image with fmo model Aviable function: - clear_mash: delete all the mesh in the secene - scene_setting_init: set scene configurations - node_setting_init: set node configurations - render: render rgb image for one obj file and one viewpoint - render_obj: wrapper function for render() render - init_all: a wrapper function, initialize all configurations = set_image_path: reset defualt image output folder author baiyu modified by rozumden """ import sys import os import random import pickle import bpy import glob import numpy as np from mathutils import Vector from mathutils import Euler import cv2 from PIL import Image from skimage.draw import line_aa from scipy import signal from skimage.measure import regionprops # import moviepy.editor as mpy from array2gif import write_gif abs_path = os.path.abspath(__file__) sys.path.append(os.path.dirname(abs_path)) from render_helper import * from settings import * import settings import pdb def renderTraj(pars, H): ## Input: pars is either 2x2 (line) or 2x3 (parabola) if pars.shape[1] == 2: pars = np.concatenate( (pars, np.zeros((2,1))),1) ns = 2 else: ns = 5 ns = np.max([2, ns]) rangeint = np.linspace(0,1,ns) for timeinst in range(rangeint.shape[0]-1): ti0 = rangeint[timeinst] ti1 = rangeint[timeinst+1] start = pars[:,0] + pars[:,1]*ti0 + pars[:,2]*(ti0*ti0) end = pars[:,0] + pars[:,1]*ti1 + pars[:,2]*(ti1*ti1) start = np.round(start).astype(np.int32) end = np.round(end).astype(np.int32) rr, cc, val = line_aa(start[0], start[1], end[0], end[1]) valid = np.logical_and(np.logical_and(rr < H.shape[0], cc < H.shape[1]), np.logical_and(rr > 0, cc > 0)) rr = rr[valid] cc = cc[valid] val = val[valid] if len(H.shape) > 2: H[rr, cc, 0] = 0 H[rr, cc, 1] = 0 H[rr, cc, 2] = val else: H[rr, cc] = val return H def open_log(temp_folder = g_temp): # redirect output to log file logfile = os.path.join(temp_folder,'blender_render.log') try: os.remove(logfile) except OSError: pass open(logfile, 'a').close() old = os.dup(1) sys.stdout.flush() os.close(1) os.open(logfile, os.O_WRONLY) return old def close_log(old): # disable output redirection os.close(1) os.dup(old) os.close(old) def clear_mesh(): """ clear all meshes in the secene """ bpy.ops.object.select_all(action='DESELECT') for obj in bpy.data.objects: if obj.type == 'MESH': obj.select = True bpy.ops.object.delete() for block in bpy.data.meshes: if block.users == 0: bpy.data.meshes.remove(block) for block in bpy.data.materials: if block.users == 0: bpy.data.materials.remove(block) for block in bpy.data.textures: if block.users == 0: bpy.data.textures.remove(block) for block in bpy.data.images: if block.users == 0: bpy.data.images.remove(block) def scene_setting_init(use_gpu): """initialize blender setting configurations """ sce = bpy.context.scene.name bpy.data.scenes[sce].render.engine = g_engine_type bpy.data.scenes[sce].cycles.film_transparent = g_use_film_transparent #output bpy.data.scenes[sce].render.image_settings.color_mode = g_rgb_color_mode bpy.data.scenes[sce].render.image_settings.color_depth = g_rgb_color_depth bpy.data.scenes[sce].render.image_settings.file_format = g_rgb_file_format bpy.data.scenes[sce].render.use_overwrite = g_depth_use_overwrite bpy.data.scenes[sce].render.use_file_extension = g_depth_use_file_extension if g_ambient_light: world = bpy.data.worlds['World'] world.use_nodes = True bg = world.node_tree.nodes['Background'] bg.inputs[0].default_value[:3] = g_bg_color bg.inputs[1].default_value = 1.0 #dimensions bpy.data.scenes[sce].render.resolution_x = g_resolution_x bpy.data.scenes[sce].render.resolution_y = g_resolution_y bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage if use_gpu: bpy.data.scenes[sce].render.engine = 'CYCLES' #only cycles engine can use gpu bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral bpy.context.user_preferences.addons['cycles'].preferences.devices[0].use = False bpy.context.user_preferences.addons['cycles'].preferences.devices[1].use = True ndev = len(bpy.context.user_preferences.addons['cycles'].preferences.devices) print('Number of devices {}'.format(ndev)) for ki in range(2,ndev): bpy.context.user_preferences.addons['cycles'].preferences.devices[ki].use = False bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA' # bpy.types.CyclesRenderSettings.device = 'GPU' bpy.data.scenes[sce].cycles.device = 'GPU' def node_setting_init(): bpy.context.scene.use_nodes = True tree = bpy.context.scene.node_tree links = tree.links for node in tree.nodes: tree.nodes.remove(node) render_layer_node = tree.nodes.new('CompositorNodeRLayers') image_output_node = tree.nodes.new('CompositorNodeOutputFile') image_output_node.base_path = g_syn_rgb_folder links.new(render_layer_node.outputs[0], image_output_node.inputs[0]) # image_output_node = bpy.context.scene.node_tree.nodes[1] image_output_node.base_path = g_temp image_output_node.file_slots[0].path = 'image-######.png' # blender placeholder # def render(obj_path, viewpoint, temp_folder): """render rbg image render a object rgb image by a given camera viewpoint and choose random image as background, only render one image at a time. Args: obj_path: a string variable indicate the obj file path viewpoint: a vp parameter(contains azimuth,elevation,tilt angles and distance) """ vp = viewpoint cam_location = camera_location(vp.azimuth, vp.elevation, vp.distance) cam_rot = camera_rot_XYZEuler(vp.azimuth, vp.elevation, vp.tilt) cam_obj = bpy.data.objects['Camera'] cam_obj.location[0] = cam_location[0] cam_obj.location[1] = cam_location[1] cam_obj.location[2] = cam_location[2] cam_obj.rotation_euler[0] = cam_rot[0] cam_obj.rotation_euler[1] = cam_rot[1] cam_obj.rotation_euler[2] = cam_rot[2] if not os.path.exists(g_syn_rgb_folder): os.mkdir(g_syn_rgb_folder) obj = bpy.data.objects['model_normalized'] ni = g_fmo_steps maxlen = 0.5 maxrot = 1.57/6 tri = 0 # rot_base = np.array([math.pi/2,0,0]) while tri <= g_max_trials: do_repeat = False tri += 1 if not g_apply_texture: for oi in range(len(bpy.data.objects)): if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP': continue for tempi in range(len(bpy.data.objects[oi].data.materials)): if bpy.data.objects[oi].data.materials[tempi].alpha != 1.0: return True, True ## transparent object los_start = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen))) loc_step = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))/ni rot_base = np.array((random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi))) rot_step = np.array((random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot)))/ni old = open_log(temp_folder) for ki in [0, ni-1]+list(range(1,ni-1)): for oi in range(len(bpy.data.objects)): if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP': continue bpy.data.objects[oi].location = los_start + loc_step*ki bpy.data.objects[oi].rotation_euler = Euler(rot_base + (rot_step*ki)) bpy.context.scene.frame_set(ki + 1) bpy.ops.render.render(write_still=True) #start rendering if ki == 0 or ki == (ni-1): Mt = cv2.imread(os.path.join(bpy.context.scene.node_tree.nodes[1].base_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)[:,:,-1] > 0 is_border = ((Mt[0,:].sum()+Mt[-1,:].sum()+Mt[:,0].sum()+Mt[:,-1].sum()) > 0) or Mt.sum()==0 if is_border: if ki == 0: close_log(old) return False, True ## sample different starting viewpoint else: do_repeat = True ## just sample another motion direction if do_repeat: break close_log(old) if do_repeat == False: break if do_repeat: ## sample different starting viewpoint return False, True return False, False def make_fmo(path, gt_path, video_path): n_im = 5 background_images = os.listdir(g_background_image_path) seq_name = random.choice(background_images) seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.jpg")) if len(seq_images) <= n_im: seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.png")) seq_images.sort() bgri = random.randint(n_im,len(seq_images)-1) bgr_path = seq_images[bgri] B0 = cv2.imread(bgr_path)/255 B = cv2.resize(B0, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC) B[B > 1] = 1 B[B < 0] = 0 FH = np.zeros(B.shape) MH = np.zeros(B.shape[:2]) pars = np.array([[(B.shape[0]-1)/2-1, (B.shape[1]-1)/2-1], [1.0, 1.0]]).T FM = np.zeros(B.shape[:2]+(4,g_fmo_steps,)) centroids = np.zeros((2,g_fmo_steps)) for ki in range(g_fmo_steps): FM[:,:,:,ki] = cv2.imread(os.path.join(gt_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)/g_rgb_color_max props = regionprops((FM[:,:,-1,ki]>0).astype(int)) if len(props) != 1: return False centroids[:,ki] = props[0].centroid for ki in range(g_fmo_steps): F = FM[:,:,:-1,ki]*FM[:,:,-1:,ki] M = FM[:,:,-1,ki] if ki < g_fmo_steps-1: pars[:,1] = centroids[:,ki+1] - centroids[:,ki] H = renderTraj(pars, np.zeros(B.shape[:2])) H /= H.sum()*g_fmo_steps for kk in range(3): FH[:,:,kk] += signal.fftconvolve(H, F[:,:,kk], mode='same') MH += signal.fftconvolve(H, M, mode='same') Im = FH + (1 - MH)[:,:,np.newaxis]*B Im[Im > 1] = 1 Im[Im < 0] = 0 if g_skip_low_contrast: Diff = np.sum(np.abs(Im - B),2) meanval = np.mean(Diff[MH > 0.05]) print("Contrast {}".format(meanval)) if meanval < 0.2: return False if g_skip_small: sizeper = np.sum(MH > 0.01)/(MH.shape[0]*MH.shape[1]) print("Size percentage {}".format(sizeper)) if sizeper < 0.05: return False Im = Im[:,:,[2,1,0]] Ims = Image.fromarray((Im * 255).astype(np.uint8)) Ims.save(path) Ball = np.zeros(B.shape+(n_im,)) Ball[:,:,:,0] = B for ki in range(1,n_im): bgrki_path = seq_images[bgri-ki] Ball[:,:,:,ki] = cv2.resize(cv2.imread(bgrki_path)/255, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC) Ball[Ball > 1] = 1 Ball[Ball < 0] = 0 Bmed = np.median(Ball,3) Image.fromarray((B[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr.png')) Image.fromarray((Bmed[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr_med.png')) # Ims.save(os.path.join(g_temp,"I.png")) # Image.fromarray((FH * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"FH.png")) # Image.fromarray((MH * 255).astype(np.uint8)).save(os.path.join(g_temp,"MH.png")) # Image.fromarray((M * 255).astype(np.uint8)).save(os.path.join(g_temp,"M.png")) # Image.fromarray((F * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"F.png")) # Image.fromarray((B0 * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"B.png")) if False: Fwr = FM[:,:,:-1,:] * FM[:,:,-1:,:] + 1 * (1 - FM[:,:,-1:,:]) Fwr = (Fwr * 255).astype(np.uint8) # Fwr[np.repeat(FM[:,:,-1:,:]==0,3,2)]=255 out = cv2.VideoWriter(video_path,cv2.VideoWriter_fourcc(*"MJPG"), 6, (F.shape[1],F.shape[0]),True) for ki in range(g_fmo_steps): out.write(Fwr[:,:,:,ki]) out.release() return True def render_obj(obj_path, path, objid, obj_name, temp_folder): """ render one obj file by a given viewpoint list a wrapper function for render() Args: obj_path: a string variable indicate the obj file path """ vps_path = random.sample(g_view_point_file, 1)[0] vps = list(load_viewpoint(vps_path)) random.shuffle(vps) save_path = os.path.join(path,"{}_{:04d}.png".format(obj_name,objid)) gt_path = os.path.join(path,"GT","{}_{:04d}".format(obj_name,objid)) video_path = os.path.join(path,"{}_{:04d}.avi".format(obj_name,objid)) if not os.path.exists(gt_path): os.mkdir(gt_path) image_output_node = bpy.context.scene.node_tree.nodes[1] image_output_node.base_path = gt_path for imt in bpy.data.images: bpy.data.images.remove(imt) if g_apply_texture: for oi in range(len(bpy.data.objects)): if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP': continue bpy.context.scene.objects.active = bpy.data.objects[oi] # pdb.set_trace() # for m in bpy.data.materials: # bpy.data.materials.remove(m) # bpy.ops.object.material_slot_remove() bpy.ops.object.editmode_toggle() bpy.ops.uv.cube_project() bpy.ops.object.editmode_toggle() texture_images = os.listdir(g_texture_path) texture = random.choice(texture_images) tex_path = os.path.join(g_texture_path,texture) # mat = bpy.data.materials.new(texture) # mat.use_nodes = True # nt = mat.node_tree # nodes = nt.nodes # links = nt.links # # Image Texture # textureNode = nodes.new("ShaderNodeTexImage") # textureNode.image = bpy.data.images.load(tex_path) # links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color']) # mat.specular_intensity = 0 # bpy.data.objects[oi].active_material = mat # print(bpy.data.objects[oi].active_material) for mat in bpy.data.materials: nodes = mat.node_tree.nodes links = mat.node_tree.links textureNode = nodes.new("ShaderNodeTexImage") textureNode.image = bpy.data.images.load(tex_path) links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color']) # print(bpy.data.objects[oi].active_material) tri = 0 while tri <= g_max_trials: tri += 1 vp = random.sample(vps, 1)[0] sample_different_object, sample_different_vp = render(obj_path, vp, temp_folder) if sample_different_vp: if sample_different_object: print('Transparent object!') return False print('Rendering failed, repeating') continue success = make_fmo(save_path, gt_path, video_path) if success: return True print('Making FMO failed, repeating') return False def init_all(): """init everything we need for rendering an image """ scene_setting_init(g_gpu_render_enable) node_setting_init() cam_obj = bpy.data.objects['Camera'] cam_obj.rotation_mode = g_rotation_mode if g_render_light: bpy.data.objects['Lamp'].data.energy = 50 bpy.ops.object.lamp_add(type='SUN') bpy.data.objects['Sun'].data.energy = 5 ### YOU CAN WRITE YOUR OWN IMPLEMENTATION TO GENERATE DATA init_all() argv = sys.argv argv = argv[argv.index("--") + 1:] start_index = int(argv[0]) step_index = int(argv[1]) print('Start index {}, step index {}'.format(start_index, step_index)) temp_folder = g_syn_rgb_folder+g_render_objs[start_index]+'/' for obj_name in g_render_objs[start_index:(start_index+step_index)]: print("Processing object {}".format(obj_name)) obj_folder = os.path.join(g_syn_rgb_folder, obj_name) if not os.path.exists(obj_folder): os.makedirs(obj_folder) if not os.path.exists(os.path.join(obj_folder,"GT")): os.mkdir(os.path.join(obj_folder,"GT")) num = g_shapenet_categlory_pair[obj_name] search_path = os.path.join(g_shapenet_path, num, '**','*.obj') pathes = glob.glob(search_path, recursive=True) random.shuffle(pathes) objid = 1 tri = 0 while objid <= g_number_per_category: print(" instance {}".format(objid)) clear_mesh() path = random.sample(pathes, 1)[0] old = open_log(temp_folder) bpy.ops.import_scene.obj(filepath=path, axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl", use_split_groups=False, use_split_objects=True) # bpy.ops.import_scene.obj(filepath=path) close_log(old) #combine_objects() #scale_objects(0.5) result = render_obj(path, obj_folder, objid, obj_name, temp_folder) if result: objid += 1 tri = 0 else: print('Error! Rendering another object from the category!') tri += 1 if tri > g_max_trials: print('No object find in the category!!!!!!!!!') break
39.181237
200
0.619286
[ "MIT" ]
12564985/DeFMO
renderer/render_fmo.py
18,376
Python
import json from urllib.parse import parse_qs from urllib.parse import urlparse from Cryptodome.PublicKey import RSA from jwkest import b64e from jwkest.jwk import RSAKey from jwkest.jwk import load_jwks from oic.extension.message import TokenIntrospectionResponse from oic.extension.signed_http_req import SignedHttpRequest from oic.oauth2 import compact from oic.utils.jwt import JWT from oic.utils.keyio import KeyBundle __author__ = 'roland' def sign_http_args(method, url, headers, body=''): p = urlparse(url) kwargs = {'path': p.path, 'host': p.netloc, 'headers': headers, 'method': method} if body: kwargs['body'] = body query_params = compact(parse_qs(p.query)) kwargs['query_params'] = query_params return kwargs class PoPCallBack(object): def __init__(self, key, alg): self.key = key self.alg = alg def __call__(self, method, url, **kwargs): try: body = kwargs['body'] except KeyError: body = None try: headers = kwargs['headers'] except KeyError: headers = {} _kwargs = sign_http_args(method, url, headers, body) shr = SignedHttpRequest(self.key) kwargs['Authorization'] = 'pop {}'.format(shr.sign(alg=self.alg, **_kwargs)) return kwargs class PoPClient(object): def __init__(self, key_size=2048, sign_alg='RS256'): self.key_size = key_size self.state2key = {} self.token2key = {} self.alg = sign_alg def update(self, msg, state, key_size=0): """ Use to 'update' the AccessToken Request. :param msg: :param state: Used to map access token response to this request :param key_size: :return: """ if not key_size: key_size = self.key_size key = RSAKey(key=RSA.generate(key_size)) self.state2key[state] = key msg['key'] = json.dumps(key.serialize()) return msg def handle_access_token_response(self, resp): """ Map access token to a keypair. :param resp: AccessTokenResponse instance """ self.token2key[resp['access_token']] = self.state2key[resp['state']] class PoPAS(object): def __init__(self, me): self.thumbprint2key = {} self.keyjar = None self.me = me def store_key(self, key): kb = KeyBundle() kb.do_keys([key]) # Store key with thumbprint as key key_thumbprint = b64e(kb.keys()[0].thumbprint('SHA-256')).decode( 'utf8') self.thumbprint2key[key_thumbprint] = key return key_thumbprint def create_access_token(self, key_thumbprint): # creating the access_token jwt_constructor = JWT(self.keyjar, iss=self.me) # Audience is myself return jwt_constructor.pack( kid='abc', cnf={'kid': key_thumbprint}, aud=self.me) def token_introspection(self, token): jwt_constructor = JWT(self.keyjar, iss=self.me) res = jwt_constructor.unpack(token) tir = TokenIntrospectionResponse(active=True) tir['key'] = json.dumps(self.thumbprint2key[res['cnf']['kid']]) return tir class PoPRS(object): def __init__(self): self.token2key = {} def store_key(self, access_token, tir): """ Store key that was returned in response from token introspection. :param access_token: The token that was introspected :param tir: TokenIntrospectionResponse instance """ key = load_jwks(json.dumps({'keys': [json.loads(tir['key'])]})) self.token2key[access_token] = key def eval_signed_http_request(self, pop_token, access_token, method, url, headers, body=''): kwargs = sign_http_args(method, url, headers, body) shr = SignedHttpRequest(self.token2key[access_token][0]) return shr.verify(signature=pop_token, strict_query_params_verification=True, strict_headers_verification=True, **kwargs)
29.384615
76
0.612803
[ "Apache-2.0" ]
fsschmitt/pyoidc
src/oic/extension/pop.py
4,202
Python
#!/usr/bin/python from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate from subprocess import * from time import sleep, strftime from datetime import datetime from mpd import * import threading import signal import sys import os from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer import json PLAY=0 PAUSE=1 STOP=2 VOL=3 LCDon=4 lcd = Adafruit_CharLCDPlate() # create LCD object client = MPDClient() # create MPD client object lock = threading.Lock() home=os.path.dirname(os.path.realpath(__file__)) class pimp3clock_HTTPRequesthandler(BaseHTTPRequestHandler): def do_GET(self): try: if '?' in self.path: self.path,q = self.path.split('?', 1) if self.path.endswith(".js") or self.path.endswith(".css") or self.path.endswith(".png") or self.path.endswith(".gif") or self.path.endswith(".html"): f = open(home + "/web/" + self.path) self.send_response(200) if self.path.endswith(".js"): self.send_header('Content-type', 'text/javascript') elif self.path.endswith(".css"): self.send_header('Content-type', 'text/css') elif self.path.endswith(".png"): self.send_header('Content-type', 'image/png') elif self.path.endswith(".gif"): self.send_header('Content-type', 'image/gif') elif self.path.endswith(".html"): self.send_header('Content-type', 'text/html') self.end_headers() self.wfile.write(f.read()) f.close() return elif self.path.endswith(".json"): self.send_response(200) self.send_header('Content-type', 'text/javascript') self.end_headers() if self.path.endswith("status.json"): lock.acquire() song = client.currentsong() status = client.status() lock.release() self.wfile.write(json.dumps({'song': song, 'status': status})) return elif self.path.endswith("select.json"): lock.acquire() status = client.status() if status['state'] == "stop": client.play() elif status['state'] == "play": client.pause(1) elif status['state'] == "pause": client.pause(0) lock.release() self.wfile.write(json.dumps("OK")) return elif self.path.endswith("next.json"): lock.acquire() client.next() lock.release() self.wfile.write(json.dumps("OK")) return elif self.path.endswith("previous.json"): lock.acquire() client.previous() lock.release() self.wfile.write(json.dumps("OK")) return elif self.path.endswith("volume.json"): key, value = q.split('=',1) if (value < 1): value=1 lock.acquire() client.setvol(value) lock.release() self.wfile.write(json.dumps("OK")) return elif self.path.endswith("update.json"): lock.acquire() mpd_update() lock.release() return elif self.path.endswith("background.json"): lock.acquire() key, value = q.split('=',1) LCDon=int(value) lcd.backlight(LCDon) lock.release() return return else: self.send_response(301) self.send_header('Location', 'index.html') self.end_headers() return return except IOError: self.send_error(404,'File Not Found: {0} (Home: {1})'.format(self.path, home)) def do_POST(self): try: print "POST" except: pass def mpd_update(): # Load Database into current playlist client.update() client.clear() database=client.listall("/") for (i) in range(len(database)): if 'file' in database[i]: client.add(database[i]['file']) client.random(1) client.shuffle(1) client.crossfade(2) def display_lcd(title_a,st_a,vol_a): LCDoff=lcd.OFF LCDState=LCDoff LCDOffDelay=30 LCDOffCountdown=LCDOffDelay lcd.backlight(LCDon) lcd.clear() lcd.begin(16,1) play=[ 0b10000, 0b11000, 0b11100, 0b11110, 0b11100, 0b11000, 0b10000, 0b00000 ] lcd.createChar(PLAY,play) pause=[ 0b11011, 0b11011, 0b11011, 0b11011, 0b11011, 0b11011, 0b11011, 0b11011 ] lcd.createChar(PAUSE,pause) stop=[ 0b00000, 0b11111, 0b10001, 0b10001, 0b10001, 0b10001, 0b11111, 0b00000 ] lcd.createChar(STOP,stop) t=0 i=0 fr=1 oldtitle="" while 1: lock.acquire() vol=[] vol.append([0b00000,0b00000,0b00000,0b00000,0b00000,0b00000,0b00000,0b00000]) vol.append([0b00000,0b00000,0b00000,0b00000,0b00000,0b00000,0b10000,0b10000]) vol.append([0b00000,0b00000,0b00000,0b00000,0b00000,0b01000,0b11000,0b11000]) vol.append([0b00000,0b00000,0b00000,0b00000,0b00100,0b01100,0b11100,0b11100]) vol.append([0b00000,0b00000,0b00000,0b00010,0b00110,0b01110,0b11110,0b11110]) vol.append([0b00000,0b00000,0b00001,0b00011,0b00111,0b01111,0b11111,0b11111]) volbar=int((vol_a[0]+5)/(100/5)) lcd.createChar(VOL,vol[volbar]) try: if (t % 2) == 0: lcd.home() lcd.write(VOL,True) # Special Characters lcd.message(datetime.now().strftime('%d.%b %H:%M:%S')) else: title=title_a[0] if title != oldtitle: fr=1 i=0 oldtitle=title st=st_a[0] lcd.clear() lcd.write(VOL,True) # Special Characters lcd.message(datetime.now().strftime('%d.%b %H %M %S\n')) lcd.write(st,True) # Special Characters lcd.message('%s' % (title[i:15+i]) ) if ((st == PAUSE) or (st == STOP)): LCDOffCountdown=LCDOffCountdown-1 else: if (LCDOffCountdown==0): lcd.backlight(LCDon) LCDOffCountdown=LCDOffDelay if (LCDOffCountdown < 1): lcd.backlight(LCDoff) LCDOffCountdown=0 if fr==1: i=i+1 else: i=i-1 if i>len(title)-15: fr=0 if i==0: fr=1 finally: lock.release() t=t+1 sleep(0.5) def webserver(): server.serve_forever() def main_loop(): i=0; title_a=[None] st_a=[None] vol_a=[None] title_a[0]="" st_a[0]=STOP vol_a[0]=0 display_thread = threading.Thread(target=display_lcd, args=(title_a,st_a,vol_a)) display_thread.daemon=True # Causing thread to stop when main process ends. display_thread.start() webserver_thread = threading.Thread(target=webserver, args=()) webserver_thread.daemon=True # Causing thread to stop when main process ends. webserver_thread.start() client.connect("localhost", 6600) # connect to localhost:6600 mpd_update() last_button=100; while 1: lock.acquire() status = client.status() vol_a[0]=int(status['volume']) lock.release() if (i % 5) == 0: lock.acquire() song = client.currentsong() lock.release() if song == {}: title_a[0]="" else: title_a[0]=song['artist'] + " - " + song['title'] if status['state'] == "stop": st_a[0]=STOP elif status['state'] == "play": st_a[0]=PLAY elif status['state'] == "pause": st_a[0]=PAUSE lock.acquire() try: button = lcd.buttons() finally: lock.release() if ((button & 1) == 1) and (last_button != button): # SELECT if status['state'] == "stop": lock.acquire() client.play() lock.release() elif status['state'] == "play": lock.acquire() client.pause(1) lock.release() elif status['state'] == "pause": lock.acquire() client.pause(0) lock.release() elif ((button & 2) == 2) and (last_button != button): # RIGHT client.next() elif (button & 4) == 4: # DOWN if int(status['volume']) >1: lock.acquire() client.setvol(int(status['volume']) - 1) lock.release() elif (button & 8) == 8: # UP if int(status['volume']) <100: lock.acquire() client.setvol(int(status['volume']) + 1) lock.release() elif ((button & 16) == 16) and (last_button != button): # LEFT lock.acquire() client.previous() lock.release() last_button=button i=i+1; sleep(0.1) def shutdown(): client.stop() client.close() # send the close command client.disconnect() # disconnect from the server lcd.clear(); lcd.stop(); def sig_handler(signum = None, frame = None): shutdown() sys.exit(0) try: for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]: signal.signal(sig, sig_handler) server = HTTPServer(('',80), pimp3clock_HTTPRequesthandler) main_loop() except (KeyboardInterrupt, SystemExit): shutdown()
26.398352
156
0.545218
[ "Unlicense" ]
crami/pimp3clock
pimp3clock.py
9,609
Python
from drdown.careline.models.model_checkitem import CheckItem from drdown.careline.models.model_checklist import Checklist from drdown.careline.models.model_procedure import Procedure
45.75
60
0.885246
[ "MIT" ]
fga-eps-mds/2018.1-Dr-Down
drdown/careline/models/__init__.py
183
Python
import json from datetime import datetime import jsonschema import mock import numpy import pytest import utils from aa import js EARLY_DATE = datetime(2001, 1, 1, 1, 1) LATE_DATE = datetime(2010, 2, 3, 4, 5) EMPTY_ARRAY = numpy.array((0,)) @pytest.fixture def json_fetcher(): return js.JsonFetcher("localhost", 5000) @pytest.mark.parametrize("filename", ["event", "string_event", "waveform"]) def test_json_matches_schema(filename): schema_string = utils.load_from_file("aa_schema.json") schema_obj = json.loads(schema_string) json_string = utils.load_from_file(filename + ".json") json_obj = json.loads(json_string) jsonschema.validate(json_obj, schema_obj) def test_JsonFetcher_constructs_url_correctly(json_fetcher): assert json_fetcher._url == "http://localhost:5000/retrieval/data/getData.json" def test_JsonFetcher_decodes_empty_json_correctly(dummy_pv, empty_data, json_fetcher): mock_response = utils.mock_response(json_str="[]") json_fetcher._fetch_data = mock_response aa_data = json_fetcher.get_values(dummy_pv, EARLY_DATE, LATE_DATE) assert aa_data == empty_data def test_JsonFetcher_decodes_single_event_correctly(dummy_pv, json_fetcher): event_json = utils.load_from_file("event.json") mock_response = utils.mock_response(json_str=event_json) json_fetcher._fetch_data = mock.MagicMock(return_value=mock_response) aa_data = json_fetcher.get_values(dummy_pv, EARLY_DATE, LATE_DATE) assert aa_data.pv == dummy_pv assert aa_data.values[0] == 1.23 assert aa_data.timestamps[0] == 1502963093.000000123 assert aa_data.severities[0] == 1 def test_JsonFetcher_decodes_string_event_correctly(dummy_pv, json_fetcher): event_json = utils.load_from_file("string_event.json") mock_response = utils.mock_response(json_str=event_json) json_fetcher._fetch_data = mock.MagicMock(return_value=mock_response) aa_data = json_fetcher.get_values(dummy_pv, EARLY_DATE, LATE_DATE) assert aa_data.pv == dummy_pv assert aa_data.values[0] == "2015-01-08 19:47:01 UTC" assert aa_data.timestamps[0] == 1507712433.235971000 assert aa_data.severities[0] == 0 def test_JsonFetcher_decodes_waveform_events_correctly( dummy_pv, json_fetcher, data_2d_2_events ): waveform_json = utils.load_from_file("waveform.json") mock_response = utils.mock_response(json_str=waveform_json) json_fetcher._fetch_data = mock.MagicMock(return_value=mock_response) aa_data = json_fetcher.get_values(dummy_pv, EARLY_DATE, LATE_DATE) assert aa_data == data_2d_2_events def test_JsonFetcher_decodes_enum_events_correctly(dummy_pv, json_fetcher): enum_json = utils.load_from_file("enum_event.json") mock_response = utils.mock_response(json_str=enum_json) json_fetcher._fetch_data = mock.MagicMock(return_value=mock_response) aa_data = json_fetcher.get_values(dummy_pv, EARLY_DATE, LATE_DATE) assert aa_data.enum_options[5] == "Special" assert aa_data.enum_strings[0] == "User" assert aa_data[0].enum_string[0] == "User" assert aa_data.values[0] == 4
37.228916
86
0.771521
[ "Apache-2.0" ]
philipp-leitl/aapy
tests/test_js.py
3,090
Python
"""Backup handler This script is contains the backup handling functions. """ import os import time import pickle import shutil from shutil import ignore_patterns import pypianoroll import numpy as np def backup_pickle(experiment, stats): '''' Back up handling function. Arguments: experiment -- Experiment object, contains the initial sorn parameters stats -- bunch of stats stored during the simulation ''' params = experiment.init_params results_dir = experiment.results_dir files_tosave = experiment.files_tosave directory = ('backup/{}'.format(results_dir)) # creates a new directory for storing the results # sleeps for a short time to avoid conflicts when running in parallel time.sleep(np.random.rand()) for n_sim in range(1, 1000): final_dir = '{}_{}/'.format(directory, str(n_sim)) if not os.path.exists(final_dir): try: os.makedirs(final_dir) break except: pass if 'params' in files_tosave: with open(final_dir+'init_params.p', 'wb') as f: pickle.dump(params, f) if 'stats' in files_tosave: # generate MIDI track if MusicTask if hasattr(stats, 'track'): stats.track.write(final_dir+'sample.mid') # delete attributes that occupy a lot of memory space if hasattr(stats, 'input_index_readout'): del stats.input_index_readout if hasattr(stats, 'input_readout'): del stats.input_readout if hasattr(stats, 'raster_readout'): del stats.raster_readout if hasattr(stats, 't_past'): del stats.t_past with open(final_dir+'stats.p', 'wb') as f: pickle.dump(stats, f) if 'scripts' in files_tosave: # TODO: this should not need a '_' for f in ['utils', 'common', results_dir.split('_')[0]]: shutil.copytree(f, final_dir+f, ignore=ignore_patterns('*.pyc', '*.git'))
30.014706
73
0.622244
[ "MIT" ]
carolinscholl/SORN
utils/backup.py
2,041
Python
from devices import network_devices from napalm import get_network_driver from pprint import pprint def open_napalm_connection(device): """Funtion to open napalm connection and return connection object""" # Copy dictionary to ensure original object is not modified device=device.copy() # Pop "platform" as this is an invalid kwarg to napalm platform = device.pop('platform') driver = get_network_driver(platform) conn = driver(**device) conn.open() return(conn) def main(): connections = [] for device in network_devices: conn = open_napalm_connection(device) connections.append(conn) print ("\n\n") print("Print facts for all devices in connections list") print("-" * 20) for conn in connections: print() print("-" * 6) print(conn) pprint("{} facts:".format(conn.platform)) pprint(conn.get_facts()) print("-" * 6) # Close the NAPALM connection conn.close() print("\n\n") if __name__ == "__main__": main()
25.878049
72
0.64279
[ "Apache-2.0" ]
austind/pyplus-ons
day3/linting/exercise1.py
1,061
Python
# -*- coding: utf-8 -*- """ Created on Sun Feb 12 11:56:36 2017 Problemset1 - Problem 1 Note: 's' is given by system like s = 'azcbobobegghakl' @author: coskun """ s = 'azcbobobegghakl' # Paste your code into this box nvl=0 for c in s: if c=='a' or c=='e' or c=='i' or c=='o' or c=='u': nvl += 1 print("Number of vowels: " + str(nvl))
22
54
0.590909
[ "MIT" ]
coshkun/6.00.1x-MITx-Course-Training-Lab-Notes
anaconda/6.00.1x.PSet1.P1.py
352
Python
import psycopg2 # Returns connection to the DB def get_sql_connection(): conn = psycopg2.connect(user="cqwhbabxmaxxqd", password="a3063dc5aeec69b41564cd0f1e3c698e0ff9653385f3b87c0f113b70951eb5b3", host="ec2-54-235-92-244.compute-1.amazonaws.com", port="5432", database="d8d34m4nml4iij") return conn
35.083333
104
0.581948
[ "MIT" ]
MishaVernik/StudentAttendance
Attendance/context/sql_connection.py
421
Python
#################################################################################### # Jiten Dhandha, 2020 # # CFit is a curve fitting tool in python, based on the method of least squares. # # It comes equipped with some standard functions and a graphical user interface. # # # # Inspired by: LSFR.py, Abie Marshall, The University of Manchester, 2016 # #################################################################################### #################################################################################### # LIBRARIES # #################################################################################### import numpy as np import matplotlib matplotlib.use('Qt5Agg') #This requires PyQt5 to be installed. import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import scipy.optimize as opt import scipy.special as sp import scipy.stats as stats import scipy.linalg as linalg import warnings #################################################################################### # LIST OF FUNCTIONS # #################################################################################### #Class to hold all relevant function information class Function(): def __init__(self,name,func,numberOfParameters,rawFuncStr,unicodeFuncStr,rawParametersStr,unicodeParametersStr): self.name = name self.func = func self.numberOfParameters = numberOfParameters self.rawFuncStr = rawFuncStr self.unicodeFuncStr = unicodeFuncStr self.rawParametersStr = rawParametersStr self.unicodeParametersStr = unicodeParametersStr ''' Current supported functions are as follows: Polynomial: constant, linear, quadratic, cubic, quartic, quintic Periodic functions: sine wave, square wave Peak shape functions: gaussian, poisson, laplace, lorentz Polynomial-based functions: power law Exponentials and logarithms: exponential, logarithm ''' #Dictonary to hold the functions functions = { 'Constant': Function(name='Constant', func=lambda x,a: np.polyval([a],x), numberOfParameters=1, rawFuncStr=r"$y = a$", unicodeFuncStr="y = a", rawParametersStr=[r'$a$'], unicodeParametersStr=['a']), 'Linear': Function(name='Linear', func=lambda x,a,b: np.polyval([a,b],x), numberOfParameters=2, rawFuncStr=r"$y = ax+b$", unicodeFuncStr="y = ax+b", rawParametersStr=[r'$a$',r'$b$'], unicodeParametersStr=['a','b']), 'Quadratic': Function(name='Quadratic', func=lambda x,a,b,c: np.polyval([a,b,c],x), numberOfParameters=3, rawFuncStr=r"$y = ax^2+bx+c$", unicodeFuncStr="y = ax\u00B2+bx+c", rawParametersStr=[r'$a$',r'$b$',r'$c$'], unicodeParametersStr=['a','b','c']), 'Cubic': Function(name='Cubic', func=lambda x,a,b,c,d: np.polyval([a,b,c,d],x), numberOfParameters=4, rawFuncStr=r"$y = ax^3+bx^2+cx+d$", unicodeFuncStr="y = ax\u00B3+bx\u00B2+cx+d", rawParametersStr=[r'$a$',r'$b$',r'$c$',r'$d$'], unicodeParametersStr=['a','b','c','d']), 'Quartic': Function(name='Quadratic', func=lambda x,a,b,c,d,e: np.polyval([a,b,c,d,e],x), numberOfParameters=5, rawFuncStr=r"$y = ax^4+bx^3+cx^2+dx+e$", unicodeFuncStr="y = ax\u2074+bx\u00B3+cx\u00B2+dx+e", rawParametersStr=[r'$a$',r'$b$',r'$c$',r'$d$',r'$e$'], unicodeParametersStr=['a','b','c','d','e']), 'Quintic': Function(name='Quintic', func=lambda x,a,b,c,d,e,f: np.polyval([a,b,c,d,e,f],x), numberOfParameters=6, rawFuncStr=r"$y = ax^5+bx^4+cx^3+dx^2+ex+f$", unicodeFuncStr="y = ax\u2075+bx\u2074+cx\u00B3+dx\u00B2+ex+f", rawParametersStr=[r'$a$',r'$b$',r'$c$',r'$d$',r'$e$',r'$f$'], unicodeParametersStr=['a','b','c','d','e','f']), 'Sine wave': Function(name='Sine wave', func=lambda x,y0,A,omg,phi: y0 + A*np.sin(omg*x+phi), numberOfParameters=4, rawFuncStr=r"$y = y_0 + A[\sin(\omega x+\phi)]$", unicodeFuncStr="y = y\u2080 + A sin(\u03C9x+\u03D5)", rawParametersStr=[r'$y_0$',r'$A$',r'$\omega$',r'$\phi$'], unicodeParametersStr=['y\u2080','A','\u03C9','\u03D5']), 'Square wave': Function(name='Square wave', func=lambda x,y0,A,omg,phi: y0 + A*np.sign(np.sin(omg*x+phi)), numberOfParameters=4, rawFuncStr=r"$y = y_0 + A\/signum[\sin(\omega x+\phi)]$", unicodeFuncStr="y = y\u2080 + A signum[sin(\u03C9x+\u03D5)]", rawParametersStr=[r'$y_0$',r'$A$',r'$\omega$',r'$\phi$'], unicodeParametersStr=['y\u2080','A','\u03C9','\u03D5']), 'Gaussian': Function(name='Gaussian', func=lambda x,y0,A,mu,sig: y0 + (A/(sig*np.sqrt(2*np.pi)))*np.exp((-1/2)*((x-mu)/sig)**2), numberOfParameters=4, rawFuncStr=r"$y = y_0 + \frac{A}{\sigma \sqrt{2\pi}}e^{-\frac{(x-\mu)^2}{2\sigma^2}}$", unicodeFuncStr="y = y\u2080 + A/[\u03C3 \u221A(2\u03C0)] \u00D7 e^[-(x-\u03BC)\u00B2/(2\u03C3\u00B2)]", rawParametersStr=[r'$y_0$',r'$A$',r'$\mu$',r'$\sigma$'], unicodeParametersStr=['y\u2080','A','\u03BC','\u03C3']), 'Poisson': Function(name='Poisson', func=lambda x,y0,A,lmd: y0 + A*(np.exp(-lmd))*(lmd**x)/sp.gamma(x), numberOfParameters=3, rawFuncStr=r"$y = y_0 + A\/\frac{e^{-\lambda}\lambda^x}{x!}$", unicodeFuncStr="y = y\u2080 + A [(e^\u03BB)(\u03BB^x)]/x!", rawParametersStr=[r'$y_0$',r'$A$',r'$\lambda$'], unicodeParametersStr=['y\u2080','A','\u03BB']), 'Laplacian': Function(name='Laplacian', func=lambda x,y0,A,mu,b: y0 + (A/(2*b))*np.exp(-np.abs(x-mu)/b), numberOfParameters=4, rawFuncStr=r"$y = y_0 + \frac{A}{2b}e^{-\frac{|x-\mu|}{b}}$", unicodeFuncStr="y = y\u2080 + A/(2b) \u00D7 e^(-|(x-\u03BC)|/b)", rawParametersStr=[r'$y_0$',r'$A$',r'$\mu$',r'$b$'], unicodeParametersStr=['y\u2080','A','\u03BC','b']), 'Lorentzian': Function(name='Lorentzian', func=lambda x,y0,A,x0,omg: y0 + (2*A/np.pi)*(omg/(4*(x-x0)**2+omg**2)), numberOfParameters=4, rawFuncStr=r"$y = y_0 + \frac{2A}{\pi}\frac{\omega}{4(x-x_0)^2+\omega^2}$", unicodeFuncStr="y = y\u2080 + (2A/\u03C0) \u00D7 (\u03C9/[4(x-x\u2080)\u00B2+\u03C9\u00B2])", rawParametersStr=[r'$y_0$',r'$A$',r'$x_0$',r'$\omega$'], unicodeParametersStr=['y\u2080','A','x\u2080','\u03C9']), 'Power': Function(name='Power', func=lambda x,A,b: A*(x)**b, numberOfParameters=2, rawFuncStr=r"$y = Ax^b$", unicodeFuncStr="y = A x\u1D47", rawParametersStr=[r'$A$',r'$b$'], unicodeParametersStr=['A','b']), 'Exponential': Function(name='Exponential', func=lambda x,y0,A,b: y0 + A*np.exp(b*x), numberOfParameters=3, rawFuncStr=r"$y = y_0 + A\/e^{bx}$", unicodeFuncStr="y = y\u2080 + A e^(bx)", rawParametersStr=[r'$y_0$',r'$A$',r'$b$'], unicodeParametersStr=['y\u2080','A','b']), 'Logarithm': Function(name='Logarithm', func=lambda x,y0,A,x0: y0 + A*np.log(x-x0), numberOfParameters=3, rawFuncStr=r"$y = y_0 + A\/log(x-x_0)$", unicodeFuncStr="y = y\u2080 + A log(x-x\u2080)", rawParametersStr=[r'$y_0$',r'$A$',r'$x_0$'], unicodeParametersStr=['y\u2080','A','x\u2080']) } #################################################################################### # GLOBAL VARIABLES # #################################################################################### #DATA RELATED VARIABLES data = [] #holds the data from data file x = [] #holds the x values from the data file y = [] #holds the y values from the data file y_err = [] #holds the y errors, either from user file or generated ERR = bool #boolean to check if data file contains errors numberOfDataPoints = int #holds the number of points in the data file #FIT FUNCTION RELATED VARIABLES function = '' #string holding the function to fit to numberOfParameters = int #holds the number of parameters of the fitting function #FITTING VARIABLES fitStructure = [] #holds the fitting information from curve_fit/polyfit fitParameters = [] #holds the fitting parameters fitErrors = [] #holds the errors on the fitting parameters chiSquared = float #holds the final chi-squared value of the fit redChiSquared = float #holds the final reduced chi-squared value of the fit redChiSquaredLimits = [] #holds the "acceptable range" of reduced chi-squared #################################################################################### # READING USER FILE # #################################################################################### ''' This function tries to read the file held at fileLocation and sets the global variables that hold all the information about the data set. @Arguments: fileLocation - string containing the location of the file chosen by user. @Return value: Returns an integer that specifies success (0) or failure (non 0) of the function. ''' def readFile(fileLocation): #Access to global variables global data global x global y global y_err global ERR global numberOfDataPoints #Checking if the file string is empty if(fileLocation == ''): return 1 #Checking if the file is a .txt or .csv file if(not fileLocation.endswith('.txt') and not fileLocation.endswith('.csv') ): return 2 #Trying to populate the data array from the file (allows both spaces and commas) try: with open(fileLocation, 'r') as file: clean_lines = [' '.join(line.split()) for line in file] for delims in [(' ,',','),(', ',','),(' ',',')]: clean_lines = [line.replace(*delims) for line in clean_lines] data = np.genfromtxt(clean_lines, delimiter=',',dtype='float_') except (TypeError, ValueError, AttributeError): return 3 #Checking if the data array has 2 or 3 columns try: if(not len(data[0])==2 and not len(data[0])==3): return 4 except TypeError: return 4 #Checking if there are any NaN's or Inf's in the data if(np.any(np.isnan(data)) or np.any(np.isinf(data))): return 5 #Checking if the errors are all positive number if(len(data[0])==3 and np.any(data[:,2]<=0)): return 6 #Setting global variables numberOfDataPoints = len(data) data = data[data[:,0].argsort()] #Sorting the array in ascending order along x column x = data[:,0] y = data[:,1] #Checking if error along y axis has been provided if(len(data[0])==2): y_err = np.array([1 for i in data]) #Constant error to aid in best chi-squared estimate ERR = False elif(len(data[0])==3): y_err = data[:,2] ERR = True #All ran correctly! return 0 #################################################################################### # FIT - RELATED FUNCTIONS # #################################################################################### ''' This function calculates the chi-squared against the data set given specific values of fitting function parameters. @Arguments: params - array containing parameters of the fitting function to calculate chi-squared against @Return value: Returns chi-squared as a float. ''' def calcChiSquared(params): #Access to global variables global function global x global y global y_err #Returning chi-squared value for the given fitting function parameters return np.sum( ((y-functions[function].func(x,*params))/y_err)**2 ) ''' This function calculates the final chi-squared as well as reduced chi-squared of the fit. It also calculates the acceptable range of reduced chi-squared based on the chi-squared statistic. @Arguments: -- @Return value: -- ''' def calcGoodnessOfFit(): #Access to global variables global numberOfDataPoints global numberOfParameters global fitParameters global chiSquared global redChiSquared global redChiSquaredLimits #Calculating degrees of freedom degreesOfFreedom = numberOfDataPoints - numberOfParameters #Calculating chi-squared and reduced chi-squared chiSquared = calcChiSquared(fitParameters) redChiSquared = chiSquared/degreesOfFreedom #Calculating the "acceptable" range of reduced chi-squared pValues = [0.95,0.05] redChiSquaredLimits = stats.chi2.isf(pValues,degreesOfFreedom)/degreesOfFreedom ''' This function provides an initial guess for the final fitting to take place in fitFunction(). It comes into play when the user wants to fit the data automatically. The initial guess is based on a two step procedure. It involves looking at the data: 1) and figuring out a single-valued "guess" 2) or figuring out bounds on the parameters and obtaining a guess from that by global minimization of chi-squared using the scipy differential evolution algorithm. @Arguments: -- @Return value: Returns an integer denoting success (0) or failure (non 0) of the function. ''' def guessParameters(): #Access to global variables global function global x global y global numberOfDataPoints #Useful quantities for parameter estimation xmin = min(x) xmax = max(x) ymin = min(y) ymax = max(y) #Empty array to store "initial guess" iniParameters = [] #All the parameter estimation happens here if(function in ['Constant','Linear','Quadratic','Cubic','Quartic','Quintic']): order = numberOfParameters - 1 iniParameters = np.polyfit(x,y,deg=order,w=1/y_err) elif(function=='Sine wave'): x_range = xmax - xmin y_range = ymax - ymin y0_bound = (ymin+2/5*abs(y_range),ymax-2/5*abs(y_range)) A_bound = (abs(y_range)/3,2*abs(y_range)/3) phi_bound = (0,2*np.pi) y_avg = np.average(y) y_std = np.std(y) yscaled = [] for i in y: if(i>y_avg+y_std): yscaled.append(1) elif(i<y_avg-y_std): yscaled.append(-1) else: yscaled.append(0) flag = yscaled[0] crossings = 0 for i in yscaled: if(i==0): continue if(flag==0): flag=i elif(i==-flag): flag = -flag crossings+=1 crossings = crossings/2 guess_f = crossings/x_range omg_bound = (0.5*(2*np.pi)*guess_f,2*(2*np.pi)*guess_f) BOUNDS = [y0_bound, A_bound, omg_bound, phi_bound] BOUNDS = [np.sort(bound) for bound in BOUNDS] with warnings.catch_warnings(): warnings.filterwarnings('ignore') iniParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x elif(function=='Square wave'): x_range = xmax - xmin y_range = ymax - ymin y0_bound = (ymin+2/5*abs(y_range),ymax-2/5*abs(y_range)) A_bound = (abs(y_range)/3,2*abs(y_range)/3) phi_bound = (0,2*np.pi) y_avg = np.average(y) y_std = np.std(y) yscaled = [] for i in y: if(i>y_avg+y_std): yscaled.append(1) elif(i<y_avg-y_std): yscaled.append(-1) else: yscaled.append(0) flag = yscaled[0] crossings = 0 for i in yscaled: if(i==0): continue if(flag==0): flag=i elif(i==-flag): flag = -flag crossings+=1 crossings = crossings/2 guess_f = crossings/x_range omg_bound = (0.5*(2*np.pi)*guess_f,2*(2*np.pi)*guess_f) BOUNDS = [y0_bound, A_bound, omg_bound, phi_bound] BOUNDS = [np.sort(bound) for bound in BOUNDS] with warnings.catch_warnings(): warnings.filterwarnings('ignore') iniParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x elif(function=='Gaussian'): x_range = xmax - xmin y_range = ymax - ymin mu_bound = (xmin-x_range,xmax+x_range) omg_bound = (0,x_range) A_bound1 = (abs(y_range)/3,2*abs(y_range)*2.5*x_range) y0_bound1 = (ymin-y_range,ymin+y_range/2) BOUNDS1 = [y0_bound1, A_bound1, mu_bound, omg_bound] BOUNDS1 = [np.sort(bound) for bound in BOUNDS1] A_bound2 = (-abs(y_range)/3,-2*abs(y_range)*2.5*x_range) y0_bound2 = (ymax-y_range/2,ymax+y_range) BOUNDS2 = [y0_bound2, A_bound2, mu_bound, omg_bound] BOUNDS2 = [np.sort(bound) for bound in BOUNDS2] BOUNDS_LIST = [BOUNDS1,BOUNDS2] with warnings.catch_warnings(): warnings.filterwarnings('ignore') bestChiSquared = np.inf for BOUNDS in BOUNDS_LIST: tempParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x tempChiSquared = calcChiSquared(tempParameters) if(tempChiSquared < bestChiSquared): bestChiSquared = tempChiSquared iniParameters = tempParameters elif(function=='Poisson'): x_range = xmax - xmin y_range = ymax - ymin lmd_bound = (max(0,xmin-x_range),xmax+x_range) A_bound1 = (0,2*abs(y_range)) y0_bound1 = (ymin-y_range,ymin+y_range/2) BOUNDS1 = [y0_bound1, A_bound1, lmd_bound] BOUNDS1 = [np.sort(bound) for bound in BOUNDS1] A_bound2 = (0,-2*abs(y_range)) y0_bound2 = (ymax-y_range/2,ymax+y_range) BOUNDS2 = [y0_bound1, A_bound1, lmd_bound] BOUNDS2 = [np.sort(bound) for bound in BOUNDS2] BOUNDS_LIST = [BOUNDS1,BOUNDS2] with warnings.catch_warnings(): warnings.filterwarnings('ignore') bestChiSquared = np.inf for BOUNDS in BOUNDS_LIST: tempParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x tempChiSquared = calcChiSquared(tempParameters) if(tempChiSquared < bestChiSquared): bestChiSquared = tempChiSquared iniParameters = tempParameters elif(function=='Laplacian'): x_range = xmax - xmin y_range = ymax - ymin mu_bound = (xmin-x_range,xmax+x_range) b_bound = (0,x_range) A_bound1 = (abs(y_range)/3,2*abs(y_range)*2*x_range) y0_bound1 = (ymin-y_range,ymin+y_range/2) BOUNDS1 = [y0_bound1, A_bound1, mu_bound, b_bound] BOUNDS1 = [np.sort(bound) for bound in BOUNDS1] A_bound2 = (-abs(y_range)/3,-2*abs(y_range)*2*x_range) y0_bound2 = (ymax-y_range/2,ymax+y_range) BOUNDS2 = [y0_bound2, A_bound2, mu_bound, b_bound] BOUNDS2 = [np.sort(bound) for bound in BOUNDS2] BOUNDS_LIST = [BOUNDS1,BOUNDS2] with warnings.catch_warnings(): warnings.filterwarnings('ignore') bestChiSquared = np.inf for BOUNDS in BOUNDS_LIST: tempParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x tempChiSquared = calcChiSquared(tempParameters) if(tempChiSquared < bestChiSquared): bestChiSquared = tempChiSquared iniParameters = tempParameters elif(function=='Lorentzian'): x_range = xmax - xmin y_range = ymax - ymin x0_bound = (xmin-x_range,xmax+x_range) omg_bound = (0,x_range) A_bound1 = (abs(y_range)/3,2*abs(y_range)*np.pi/2*x_range) y0_bound1 = (ymin-y_range,ymin+y_range/2) BOUNDS1 = [y0_bound1, A_bound1, x0_bound, omg_bound] BOUNDS1 = [np.sort(bound) for bound in BOUNDS1] A_bound2 = (-abs(y_range)/3,-2*abs(y_range)*np.pi/2*x_range) y0_bound2 = (ymax-y_range/2,ymax+y_range) BOUNDS2 = [y0_bound2, A_bound2, x0_bound, omg_bound] BOUNDS2 = [np.sort(bound) for bound in BOUNDS2] BOUNDS_LIST = [BOUNDS1,BOUNDS2] with warnings.catch_warnings(): warnings.filterwarnings('ignore') bestChiSquared = np.inf for BOUNDS in BOUNDS_LIST: tempParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x tempChiSquared = calcChiSquared(tempParameters) if(tempChiSquared < bestChiSquared): bestChiSquared = tempChiSquared iniParameters = tempParameters elif(function=='Power'): lX = np.log(abs(x), where=x>0) lY = np.log(abs(y), where=x>0) with np.errstate(invalid='ignore'): b_est, logA_est = np.polyfit(lX,lY,w=np.exp(lX),deg=1) A_est = np.exp(logA_est) A_bound = (-A_est,A_est) b_bound = (b_est-0.5*abs(b_est),b_est+0.5*abs(b_est)) BOUNDS = [A_bound,b_bound] BOUNDS = [np.sort(bound) for bound in BOUNDS] with warnings.catch_warnings(): warnings.filterwarnings('ignore') iniParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x elif(function=='Exponential'): #Inspired by https://github.com/scipy/scipy/pull/9158 s = np.empty_like(y) s[0] = 0 s[1:] = np.cumsum(0.5 * (y[1:] + y[:-1]) * np.diff(x)) xn = np.array(x - x[0]) yn = np.array(y - y[0]) sx2 = np.sum(xn**2) sxs = np.sum(xn*s) sys = np.sum(yn*s) ss2 = np.sum(s**2) sxy = np.sum(xn*yn) _, [b] = linalg.inv([[sx2, sxs], [sxs, ss2]]).dot([[sxy], [sys]]) ex = np.exp(b * x) se1 = np.sum(ex) se2 = np.sum(ex**2) sy0 = np.sum(y) sye = np.sum((y * ex)) [y0], [A] = linalg.inv([[x.size, se1], [se1, se2]]).dot([[sy0], [sye]]) iniParameters = [y0,A,b] elif(function=='Logarithm'): #Inspired by https://github.com/scipy/scipy/pull/9158 s = np.empty_like(x) s[0] = 0 s[1:] = np.cumsum(0.5 * (x[1:] + x[:-1]) * np.diff(y)) xn = np.array(x - x[0]) yn = np.array(y - y[0]) sy2 = np.sum(yn**2) sys = np.sum(yn*s) sxs = np.sum(xn*s) ss2 = np.sum(s**2) syx = np.sum(xn*yn) _, [t1] = linalg.inv([[sy2, sys], [sys, ss2]]).dot([[syx], [sxs]]) A = 1/t1 ey = np.exp(t1 * y) se1 = np.sum(ey) se2 = np.sum(ey**2) sx0 = np.sum(x) sxe = np.sum((x * ey)) [x0], [t2] = linalg.inv([[x.size, se1], [se1, se2]]).dot([[sx0], [sxe]]) with warnings.catch_warnings(): warnings.filterwarnings('ignore') y0 = -A*np.log(t2) iniParameters = [y0,A,x0] #If there is no initial guess if(iniParameters==[]): return 1 #Sending the "best guess" parameters to the final fitting algorithm return fitFunction(iniParameters) ''' This function converts a string containing the guess parameters for fitting provided by the user into an array of floats for the fitFunction() to use. It comes into play when the user wants to fit the data manually. @Arguments: iniParametersString - string containing initial parameters @Return value: Returns an integer denoting success (0) or failure (non 0) of the function. ''' def manualParameters(iniParametersString): #Access to global variables global numberOfParameters #Splitting string delimited by commas splitString = iniParametersString.split(',') #Trying to populate the iniParameters array iniParameters = [] try: for i in splitString: #Further splitting each sub-string into "right and left" of the "=" sign temp1 = i.split('=') #Taking the value on the right side and converting to float temp2 = float(temp1[1]) #Adding it to the iniParameters array iniParameters.append(temp2) except (ValueError,IndexError): return 3 #Checking if the number of parameters expected and received match if(len(iniParameters)!=numberOfParameters): return 3 #Sending the "manually input" parameters to the final fitting algorithm return fitFunction(iniParameters) ''' This function does the final fitting of the data. It takes an inital guess on the parameters and optimizes from there. @Arguments: iniParameters - array containing initial guess @Return value: Returns an integer denoting success (0) or failure (non 0) of the function. ''' def fitFunction(iniParameters): #Access to global variables global function global x global y global y_err global numberOfDataPoints global numberOfParameters global fitStructure global fitParameters global fitErrors if(numberOfDataPoints<numberOfParameters): return 2 #Doing the final fitting of the data try: #Ignoring runtime warnings (in case the optimization passes through invalid values) with warnings.catch_warnings(): warnings.filterwarnings('ignore') #Main optimization happens here #Note: curve_fit populates sigma with 1's as a default. #absolute_sigma = True is the flag that forces errors to not be used in a relative manner fitStructure = opt.curve_fit(functions[function].func,x,y,absolute_sigma=True,p0=iniParameters,sigma=y_err) #Catching errors except RuntimeError as e: #Optimization failed if (str(e).startswith('Optimal parameters not found: Number of calls to function has reached maxfev')): return 1 #Something else went wrong else: raise #Filling in the fit parameters and errors on them (from the covariance matrix) fitParameters = fitStructure[0] fitErrors = np.sqrt(np.diag(fitStructure[1])) #Quantizing the goodness of fit calcGoodnessOfFit() #All ran correctly! return 0 #################################################################################### # PLOTTING FUNCTIONS # #################################################################################### ''' This function plots the raw data (without the fit). @Arguments: plotTitle - string holding the title of the plot xTitle - string holding the label for the x axis yTitle - string holding the label for the y axis viewGrid - boolean denoting whether the user wants the plot to have gridlines @Return value: -- ''' def plotRawData(plotTitle,xTitle,yTitle,viewGrid): #Access to global variables global x global y global y_err global ERR #Creating figure and adding subplot figure1 = plt.figure() axes1 = figure1.add_subplot(111) #Setting x and y axis labels axes1.set_title(plotTitle, fontsize='x-large') axes1.set_xlabel(xTitle, fontsize='large') axes1.set_ylabel(yTitle, fontsize='large') #Checking if user wants to add grid to plot and adding them if(viewGrid): axes1.minorticks_on() axes1.set_axisbelow(True) axes1.grid(b=True, which='major', alpha=0.5) axes1.grid(b=True, which='minor', alpha=0.2) #Plotting the raw data if(ERR): axes1.errorbar(x,y,y_err,fmt='.',color='midnightblue',ecolor='royalblue',capsize=2) else: axes1.scatter(x,y,color='midnightblue', label='Data') #Displaying the beauty figure1.show() ''' This function plots the raw data along with the fitting function and shows the fitting parameters if the user wants to see it. @Arguments: plotTitle - string holding the title of the plot xTitle - string holding the label for the x axis yTitle - string holding the label for the y axis viewGrid - boolean holding whether the user wants the plot to have gridlines viewParameters - boolean holding whether the user wants to see the fitting parameters viewResiduals - boolean holding whether the user wants to see the residuals plot @Return value: -- ''' def plotFitData(plotTitle,xTitle,yTitle,viewGrid,viewParameters,viewResiduals): #Access to global variables global x global y global y_err global ERR global numberOfDataPoints global function global numberOfParameters global fitParameters global fitErrors global chiSquared global redChiSquared global redChiSquaredLimits #Creating figure and adding subplots figure2 = plt.figure() if(viewResiduals and viewParameters): gs = gridspec.GridSpec(2, 2, height_ratios=[3, 1], width_ratios=[4,1]) axes2 = figure2.add_subplot(gs[0,0]) axes3 = figure2.add_subplot(gs[1,0]) axes4 = figure2.add_subplot(gs[0,1]) elif(viewResiduals and not viewParameters): gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1]) axes2 = figure2.add_subplot(gs[0]) axes3 = figure2.add_subplot(gs[1]) elif(not viewResiduals and viewParameters): gs = gridspec.GridSpec(1, 2, width_ratios=[4, 1]) axes2 = figure2.add_subplot(gs[0]) axes4 = figure2.add_subplot(gs[1]) else: axes2 = figure2.add_subplot(111) #Setting axes titles axes2.set_title(plotTitle, fontsize='x-large') axes2.set_xlabel(xTitle, fontsize='large') axes2.set_ylabel(yTitle, fontsize='large') #Checking if user wants to add grid to plot and adding them if(viewGrid): axes2.minorticks_on() axes2.set_axisbelow(True) axes2.grid(b=True, which='major', alpha=0.5) axes2.grid(b=True, which='minor', alpha=0.2) if(viewResiduals): axes3.minorticks_on() axes3.set_axisbelow(True) axes3.grid(b=True, which='major', alpha=0.5) axes3.grid(b=True, which='minor', alpha=0.2) #Plotting the raw data if(ERR): axes2.errorbar(x,y,y_err, fmt='.', color='midnightblue', ecolor='royalblue', capsize=2, zorder=1, label='Data') else: axes2.scatter(x,y,color='midnightblue', label='Data') #Plotting the best fit xx = np.linspace(min(x),max(x),1000) yy = functions[function].func(xx,*fitParameters) axes2.plot(xx,yy,color='darkorange', zorder=2, label='Fit function') #Plotting the residuals if(viewResiduals): residuals = functions[function].func(x,*fitParameters) - y axes3.axhline(0,color='darkorange', zorder=2) if(ERR==True): axes3.errorbar(x,residuals,y_err,fmt='.', color='midnightblue', ecolor='royalblue', capsize=2, zorder=1) else: axes3.scatter(x,residuals,color='midnightblue') #Adding legend to the plot axes2.legend(markerscale=2, fontsize='large') #Displaying fit parameters if the user wants if(viewParameters): #Removing x and y axis axes4.set_axis_off() #Declaring the string array that holds everything displayed in the parameters box parametersStr = [] #Adding function type to parameters box parametersStr.append(r"$\bf{Function:}$") parametersStr.append(functions[function].name) parametersStr.append(functions[function].rawFuncStr) #Adding fit parameters to the parameters box parametersStr.append("") parametersStr.append(r"$\bf{Fitting\/parameters:}$") for i in range(numberOfParameters): parametersStr.append(functions[function].rawParametersStr[i]+r' = {0:.5e} $\pm$ {1:.5e}'.format(fitParameters[i],fitErrors[i])) #Adding some additional fitting details to the parameters box parametersStr.append("") parametersStr.append(r"$\bf{Other\/fitting\/data:}$") parametersStr.append(r'Number of data points = {0}'.format(numberOfDataPoints)) parametersStr.append(r'Number of parameters = {0}'.format(numberOfParameters)) parametersStr.append(r'$\chi^2$ = {0:.5e}'.format(chiSquared)) parametersStr.append(r'$\chi_r^2$ = {0:.5e}'.format(redChiSquared)) parametersStr.append(r'Acceptable range of $\chi_r^2$ = ({0:.2f},{1:.2f})'.format(redChiSquaredLimits[0],redChiSquaredLimits[1])) #Adding an important note if(not ERR): parametersStr.append("") parametersStr.append(r'$\bf{Note}$: Errors and chi-squared estimates') parametersStr.append(r'here dont mean much since no errors') parametersStr.append(r'along y-axis are present!') #Joining all elements of the string array into a single string separated by \n's parametersStr = '\n'.join(parametersStr) #Placing the parameters box in the plot axes4.text(-0.35,1.0,parametersStr, bbox=dict(boxstyle="square", fc="lemonchiffon", ec="darkorange", pad=0.5), va='top', ha='left', fontsize='large', linespacing=1.3) #Displaying the beauty figure2.show() ''' APPENDIX: Check efficiency of differential evolution against other global minimization techniques: iniParameters = opt.brute(calcChiSquared,ranges=[],finish=opt.fmin) iniParameters = opt.basinhopping(calcChiSquared,x0=[]) '''
38.791267
140
0.550199
[ "CC0-1.0" ]
JitenDhandha/CFit
Fitting.py
36,425
Python
# -*- coding: utf-8 -*- # @Author: jankincai # @Date: 2021-01-26 23:18:43 # @Last Modified by: jankincai # @Last Modified time: 2021-01-26 23:27:01 class LibpcapError(Exception): """Exception raised for errors in the libpcap. """ def __init__(self, message): """init """ self.message = message def __str__(self): """""" return self.message
18.454545
50
0.573892
[ "BSD-3-Clause" ]
caizhengxin/python-libpcap
pylibpcap/exception.py
406
Python
import os import numpy as np import matplotlib.pyplot as plt # import sys, os # sys.path.append(os.path.join(os.path.dirname(__file__), 'utils')) import process_data import common def plot_gt(Y_origin_data, pose_folder, preprocessed_folder, data_seqs, seq_sizes, dim="2d", save_graph=True, dataset="KITTI"): start_idx = 0 end_idx = 0 additional_row = np.array([0, 0, 0, 1], dtype=np.float64) for seq in data_seqs: end_idx += seq_sizes[seq] origin_poses = np.zeros((Y_origin_data[start_idx:end_idx].shape[0], 4,4),dtype=np.float64) for idx, row in enumerate(Y_origin_data[start_idx:end_idx]): new_pose = np.array(list(map(float, row.strip().split(" "))), dtype=np.float64) new_pose = np.concatenate((new_pose, additional_row)) new_pose = new_pose.reshape(4,4) origin_poses[idx] = new_pose fig = plt.figure(figsize=(10,10)) if dim == "2d": plt.scatter(origin_poses[:,0,3],origin_poses[:,1,3], c=origin_poses[:,2,3], s=20, alpha=0.5) else: # 3d ax = fig.add_subplot(111, projection='3d') ax.scatter(origin_poses[:,0,3],origin_poses[:,1,3],origin_poses[:,2,3],c=origin_poses[:,1,3], s=20, alpha=0.5) if save_graph: graph_folder = os.path.join('result', dataset, 'graph') os.makedirs(graph_folder, exist_ok=True) plt.savefig(os.path.join(graph_folder, f"gt_{seq}_{dim}.png")) # plt.close(fig) start_idx += seq_sizes[seq] def plot_results(Y_origin_data, Y_estimated_data, data_seqs, rnn_size, seq_sizes, dim="2d", save_graph=True, dataset="KITTI"): start_idx = 0 end_idx = 0 additional_row = np.array([0, 0, 0, 1], dtype=np.float64) for i, seq in enumerate(data_seqs): end_idx += seq_sizes[seq] poses = np.zeros((Y_origin_data[start_idx:end_idx].shape[0], 4,4),dtype=np.float64) for idx in range(rnn_size): current_pose = np.array(list(map(float, Y_origin_data[start_idx+idx].strip().split(" "))), dtype=np.float64) current_pose = np.concatenate((current_pose, additional_row)) current_pose = current_pose.reshape(4,4) poses[idx] = current_pose for idx, relative_pose in enumerate(Y_estimated_data[start_idx-i*rnn_size:end_idx-(i+1)*rnn_size]): rot_mat = common.euler_to_rot_mat(relative_pose[5],relative_pose[4],relative_pose[3]) trans_mat = np.identity(4) trans_mat[:3,:3]=rot_mat trans_mat[0,3]=relative_pose[0] trans_mat[1,3]=relative_pose[1] trans_mat[2,3]=relative_pose[2] current_pose = np.dot(current_pose, trans_mat) poses[idx + rnn_size] = current_pose fig = plt.figure(figsize=(10,10)) if dim == "2d": plt.scatter(poses[:,0,3],poses[:,1,3], c=poses[:,2,3], s=20, alpha=0.5) else: # 3d ax = fig.add_subplot(111, projection='3d') ax.scatter(poses[:,0,3],poses[:,1,3],poses[:,2,3],c=poses[:,1,3], s=20, alpha=0.5) if save_graph: graph_folder = os.path.join('result', dataset, 'graph') os.makedirs(graph_folder, exist_ok=True) plt.savefig(os.path.join(graph_folder, f"est_{seq}_{dim}.png")) # plt.close(fig) start_idx += seq_sizes[seq]
44.526316
134
0.618794
[ "MIT" ]
donghwijung/LoRCoN-LO
utils/plot.py
3,384
Python
""" Module for handling the custom Lua commands for the bot """ import lupa import shlex import time from threading import Thread from .utils import human_readable_time, ArgumentParser from .http import Http, TupleData from .timer import Interval, Delayed from .chat import Chat class CommandPermissionError(BaseException): """ An exception that happens when a user tries to execute a custom command without the appropriate user level for it. """ pass class CommandCooldownError(BaseException): """ An exception that happens when a user tries to execute a custom command during it's cooldown period. """ pass class DataSource(object): """ A simple structure to allow Lua to store and read data from the database Call from Lua via the injected _G["datastore"] instance: .. code-block:: lua _G["datastore"].set("my-data", "my-value") _G["datastore"].get("my-data") If not working directly with the datasource implementation you should however use the datasource wrapper: .. code-block:: lua local ds = require('datasource') ds.set("my-data", "my-value") ds.get("my-data") """ def __init__(self, channel, bot, data=None): if not data: data = {} self.channel = channel self.bot = bot self.data = data def get(self, key): """ Get a single value from the database :param key: The name of the value :return: The stored value or "null" if not found """ if not key in self.data: # TODO: Check how to work around this silly shit return "null" return self.data[key] def set(self, key, value): """ Set a single value to the database :param key: The name of the value :param value: The value to be stored :return: None """ self.data[key] = value self.bot.update_global_value(self.channel, key, value) class CommandManager(object): """ Manager for custom commands """ # Template for creating new Lua functions func_template = u""" function __chat__{func_name}({args}) {func_body} end """ # Function template for doing Lua function calls # {{ and }} are escaped { and } for .format() call_template = u""" function(...) local chat = require("chat") local retval = __chat__{func_name}(unpack(table.pack(...))) if retval ~= nil then chat.message(retval) end return retval end """ def __init__(self, channel, bot, settings=None, data=None, logger=None, chat=None): self.channel = channel self.bot = bot if chat: self.chat = chat else: self.chat = Chat(self.bot, self.channel) self.settings = settings self.logger = logger self.commands = {} self.timers = [] self.datasource = DataSource(channel, bot, data) self.commands_last_executed = {} self.lua = lupa.LuaRuntime(unpack_returned_tuples=False) self._inject_globals() def stop_timers(self): """ Cancel all timers still running :return: """ for timer in self.timers: timer.cancel() def add_command(self, args): """ Handler for the "def" -commands in chat :param args: All the words after the "def" -command :return: The created command, if any, and the minimum required user level """ added, command, flags, user_level, code = self._parse_func(args) channel, command, flags, user_level, code = self.load_command( command, flags, user_level, code ) return added, channel, command, flags, user_level, code def add_simple_command(self, args): """ Handler for the "com" -commands in chat :param args: All the words after the "com" -command :return: A bunch of stuff """ added, command, flags, user_level, code = self._parse_simple_func(args) channel, command, flags, user_level, code = self.load_command( command, flags, user_level, code ) return added, channel, command, flags, user_level, code def is_valid_command(self, command): """ Check if the given command is registered :param command: The name of the command :return: True or False """ return command in self.commands def load_command(self, command, flags, user_level, code, set=True): """ Load a command in the runtime :param command: What is the command called :param flags: Command flags :param user_level: The minimum user level to run the command :param code: The Lua code for the custom command :param set: Should the command be set on the bot via set_command, set this to False when loading commands from e.g. the database :return: None """ if self.logger: self.logger.debug(u"Loading command {0} with user level " u"{1}".format( command, user_level )) self.commands[command] = { "flags": flags, "user_level": user_level, "code": code } self.load_lua(code) return self.channel, command, flags, user_level, code def run_command(self, nick, user_level, command, args=None, timestamp=None, threaded=True): """ Handles running of custom commands from chat :param nick: The calling user :param user_level: The calling user's level :param command: The command triggered :param args: The words on the line after the command :param timestamp: The unixtime for when the event happened :return: Any return value from the custom Lua command, to be sent back to the channel :raise CommandPermissionError: If user lacks permissions for command """ if not self._can_run_command(user_level, command): raise CommandPermissionError(u"User does not have permission to " u"run this command") if args is None: args = [] else: if "quoted" in self.commands[command]["flags"]: if self.commands[command]["flags"]["quoted"] == 1: text = " ".join(args) args = shlex.split(text) if timestamp is None: timestamp = time.time() if self._is_under_cooldown(command, timestamp): raise CommandCooldownError() self._set_last_executed_time(command, timestamp) def run(): code = self.call_template.format(func_name=command) lua_func = self.lua.eval(code) if "want_user" in self.commands[command]["flags"]: if self.commands[command]["flags"]["want_user"] == 1: args.insert(0, nick) return lua_func(*args) if threaded: lua_thread = Thread(target=run) lua_thread.daemon = True lua_thread.start() else: return run() def load_lua(self, code): """ Load Lua code in our runtime :param code: The Lua code :return: None """ self.lua.execute(code) def _parse_func(self, args): """ Process the given arguments into a function definition :param args: List of the words after the "def" command :return: Function name, if it wants the caller's user name, the required user level, and the function's Lua code :raise argparse.ArgumentError: There was something wrong with the args """ parser = ArgumentParser() parser.add_argument("-ul", "--user_level", default="mod") parser.add_argument("-c", "--cooldown", default=None) parser.add_argument("-a", "--args", default="") parser.add_argument("-w", "--want_user", action="store_true", default=False) parser.add_argument("-q", "--quoted", action="store_true", default=False) parser.add_argument("func_name") parser.add_argument("func_body", nargs='*') options = parser.parse_args(args) # Rebuild code if options.want_user: new_args = "user" if len(options.args) > 0: new_args += "," options.args = new_args + options.args code = self.func_template.format( func_name=options.func_name, args=options.args, func_body=" ".join(options.func_body) ) flags = { "want_user": int(options.want_user), "quoted": int(options.quoted), "cooldown": (int(options.cooldown) if options.cooldown else None) } added = bool(options.func_body) return added, options.func_name, flags, options.user_level, code def _parse_simple_func(self, args): """ Process the given arguments into a simple function definition :param args: List of the words after the "com" command :return: Function name, if it wants the caller's user name, the required user level, and the function's Lua code :raise argparse.ArgumentError: There was something wrong with the args """ parser = ArgumentParser() parser.add_argument("-ul", "--user_level", default="mod") parser.add_argument("-c", "--cooldown", default=None) parser.add_argument("func_name") parser.add_argument("response_text", nargs='*') options = parser.parse_args(args) # Rebuild response response_text = " ".join(options.response_text) response_text = response_text.replace("\\", "\\\\") response_text = response_text.replace('"', '\\"') func_body = u""" return SimpleCom("{response_text}", user, table.pack(...)) """.format(response_text=response_text) code = self.func_template.format( func_name=options.func_name, args="user,...", func_body=func_body ) flags = { "want_user": 1, "quoted": 0, "cooldown": (int(options.cooldown) if options.cooldown else None) } added = bool(options.response_text) return added, options.func_name, flags, options.user_level, code def _is_under_cooldown(self, command, timestamp): """ Check if this command's cooldown period is in effect :param command: Which command :param timestamp: What is the timestamp it was issued on :return: """ if command in self.commands_last_executed: if "cooldown" in self.commands[command]["flags"]: cooldown_period = self.commands[command]["flags"]["cooldown"] last_executed = self.commands_last_executed[command] if cooldown_period is not None: cooldown_expires = last_executed + cooldown_period if timestamp < cooldown_expires: return True return False def _set_last_executed_time(self, command, timestamp): """ Save the last execution time of a command :param command: Which command :param timestamp: What is the timestamp it was issued on :return: """ self.commands_last_executed[command] = timestamp def _level_name_to_number(self, name): """ Convert the given user level to a number :param name: Level name :return: A number between 0 and Infinity, higher number is higher user level :raise ValueError: In case of invalid user level """ levels = [ "user", "reg", "mod", "owner" ] if not name in levels: raise ValueError(u"{0} is not a valid user level".format(name)) return levels.index(name) def _can_run_command(self, user_level, command): """ Check if this command can be run with the given user level :param user_level: The calling user's level :param command: The command being called :return: True of False """ need_level = self._level_name_to_number( self.commands[command]["user_level"] ) got_level = self._level_name_to_number(user_level) return got_level >= need_level def _inject_globals(self): """ Inject some Python objects and functions into the Lua global scope _G :return: None """ injector = self.lua.eval(""" function (key, value) _G[key] = value end """) def log(message): """ Pass a message from Lua to the Python logger :param message: The message text :return: None """ self.logger.debug(u"Lua: " + str(message)) def interval(seconds, function): i = Interval(seconds, function, self.lua) self.timers.append(i) return i def delayed(seconds, function): i = Delayed(seconds, function, self.lua) self.timers.append(i) return i def simple_com(text, user, args): params = [] if args: for key in args: if key != "n": params.append(args[key]) try: response = text.format(*params, user=user) except IndexError: response = user + u", invalid number of arguments." return response injector("log", log) injector("datasource", self.datasource) injector("human_readable_time", human_readable_time) injector("settings", self.settings) injector("Chat", self.chat) injector("Http", Http()) injector("TupleData", TupleData) injector("Interval", interval) injector("Delayed", delayed) injector("SimpleCom", simple_com)
29.426263
79
0.57339
[ "MIT" ]
lietu/twitch-bot
bot/commandmanager.py
14,566
Python
from typing import List, Optional, Union from dataclasses import dataclass, field ## - - - - - - - ## userinfo.json ## - - - - - - - @dataclass class UserAddress: formatted: str = "" @dataclass class UserInfoResponse: group_ids: List[str] = field(default_factory=list) sub: str = "" given_name: str = "" name: str = "" email: str = "" phone_number: Optional[str] = None address: Optional[UserAddress] = None picture: Optional[str] = None ## - - - - - - - ## sprint.json ## - - - - - - - @dataclass class Sprint: id: str name: str start: str end: str @dataclass class ShortUserInfo: id: str name: str picture: str @dataclass class Issue: id: str summary: str description_short: str index: int status_id: str story_points: int assigned: Optional[ShortUserInfo] modified_at: str @dataclass class SprintResponse: sprint: Sprint issues: List[Issue] ## - - - - - - - ## create-task.json ## - - - - - - - @dataclass class CreateTaskRequestBody: author: str assigned: str summary: str project: str sprint: str sprint: str labels: List[str] issue_type: str components: List[str] description: str priority: str @dataclass class CreateTaskPerson: id: str email: str name: str picture: str is_active: bool @dataclass class CreateTaskProject: id: str name: str @dataclass class CreateTaskStatus: id: str name: str @dataclass class CreateTaskActivity: user_id: str action: str created_at: str details: Optional[Union[CreateTaskPerson, CreateTaskStatus]] = None @dataclass class CreateTaskResponse: author: CreateTaskPerson assigned: CreateTaskPerson summary: str project: CreateTaskProject sprint: str labels: List[str] issue_type: str components: List[str] description: str priority: str status: CreateTaskStatus activity: List[CreateTaskActivity] created_at: str modified_at: str ## - - - - - - - ## update-task.json ## - - - - - - - @dataclass class UpdateTaskRequestBody(CreateTaskRequestBody): pass
16.253731
71
0.636823
[ "MIT" ]
mtag-dev/py-rest-stress-testing
frameworks/schema_dataclasses.py
2,178
Python
# # Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending # import os import unittest from deephaven import kafka_consumer as ck from deephaven.stream.kafka.consumer import TableType, KeyValueSpec from tests.testbase import BaseTestCase from deephaven import dtypes class KafkaConsumerTestCase(BaseTestCase): def _assert_common_cols(self, cols): self.assertEqual("KafkaPartition", cols[0].name) self.assertEqual(dtypes.int32, cols[0].data_type) self.assertEqual("KafkaOffset", cols[1].name) self.assertEqual(dtypes.long, cols[1].data_type) self.assertEqual("KafkaTimestamp", cols[2].name) self.assertEqual(dtypes.DateTime, cols[2].data_type) def test_basic_constants(self): """ Check that the basic constants are imported and visible. """ self.assertIsNotNone(ck.SEEK_TO_BEGINNING) self.assertIsNotNone(ck.DONT_SEEK) self.assertIsNotNone(ck.SEEK_TO_END) self.assertIsNotNone(ck.ALL_PARTITIONS_SEEK_TO_BEGINNING) self.assertIsNotNone(ck.ALL_PARTITIONS_SEEK_TO_END) self.assertIsNotNone(ck.ALL_PARTITIONS_DONT_SEEK) def test_simple_spec(self): """ Check a simple Kafka subscription creates the right table. """ t = ck.consume( {'bootstrap.servers': 'redpanda:29092'}, 'orders', key_spec=KeyValueSpec.IGNORE, value_spec=ck.simple_spec('Price', dtypes.double)) cols = t.columns self.assertEqual(4, len(cols)) self._assert_common_cols(cols) self.assertEqual("Price", cols[3].name) self.assertEqual(dtypes.double, cols[3].data_type) def test_json_spec(self): """ Check a JSON Kafka subscription creates the right table. """ t = ck.consume( {'bootstrap.servers': 'redpanda:29092'}, 'orders', key_spec=KeyValueSpec.IGNORE, value_spec=ck.json_spec( [('Symbol', dtypes.string), ('Side', dtypes.string), ('Price', dtypes.double), ('Qty', dtypes.int_), ('Tstamp', dtypes.DateTime)], mapping={ 'jsymbol': 'Symbol', 'jside': 'Side', 'jprice': 'Price', 'jqty': 'Qty', 'jts': 'Tstamp' } ), table_type=TableType.append() ) cols = t.columns self.assertEqual(8, len(cols)) self._assert_common_cols(cols) self.assertEqual("Symbol", cols[3].name) self.assertEqual(dtypes.string, cols[3].data_type) self.assertEqual("Side", cols[4].name) self.assertEqual(dtypes.string, cols[4].data_type) self.assertEqual("Price", cols[5].name) self.assertEqual(dtypes.double, cols[5].data_type) self.assertEqual("Qty", cols[6].name) self.assertEqual(dtypes.int_, cols[6].data_type) self.assertEqual("Tstamp", cols[7].name) self.assertEqual(dtypes.DateTime, cols[7].data_type) def test_avro_spec(self): """ Check an Avro Kafka subscription creates the right table. """ schema = \ """ { "type" : "record", "namespace" : "io.deephaven.examples", "name" : "share_price", "fields" : [ { "name" : "Symbol", "type" : "string" }, { "name" : "Side", "type" : "string" }, { "name" : "Qty", "type" : "int" }, { "name" : "Price", "type" : "double" } ] } """ schema_str = '{ "schema" : "%s" }' % \ schema.replace('\n', ' ').replace('"', '\\"') sys_str = \ """ curl -X POST \ -H 'Content-type: application/vnd.schemaregistry.v1+json; artifactType=AVRO' \ --data-binary '%s' \ http://redpanda:8081/subjects/share_price_record/versions """ % schema_str r = os.system(sys_str) self.assertEqual(0, r) with self.subTest(msg='straight schema, no mapping'): t = ck.consume( { 'bootstrap.servers': 'redpanda:29092', 'schema.registry.url': 'http://redpanda:8081' }, 'share_price', key_spec=KeyValueSpec.IGNORE, value_spec=ck.avro_spec('share_price_record', schema_version='1'), table_type=TableType.append() ) cols = t.columns self.assertEqual(7, len(cols)) self._assert_common_cols(cols) self.assertEqual("Symbol", cols[3].name) self.assertEqual(dtypes.string, cols[3].data_type) self.assertEqual("Side", cols[4].name) self.assertEqual(dtypes.string, cols[4].data_type) self.assertEqual("Qty", cols[5].name) self.assertEqual(dtypes.int32, cols[5].data_type) self.assertEqual("Price", cols[6].name) self.assertEqual(dtypes.double, cols[6].data_type) with self.subTest(msg='mapping_only (filter out some schema fields)'): m = {'Symbol': 'Ticker', 'Price': 'Dollars'} t = ck.consume( { 'bootstrap.servers': 'redpanda:29092', 'schema.registry.url': 'http://redpanda:8081' }, 'share_price', key_spec=KeyValueSpec.IGNORE, value_spec=ck.avro_spec('share_price_record', mapping=m, mapped_only=True), table_type=TableType.append() ) cols = t.columns self.assertEqual(5, len(cols)) self._assert_common_cols(cols) self.assertEqual("Ticker", cols[3].name) self.assertEqual(dtypes.string, cols[3].data_type) self.assertEqual("Dollars", cols[4].name) self.assertEqual(dtypes.double, cols[4].data_type) with self.subTest(msg='mapping (rename some fields)'): m = {'Symbol': 'Ticker', 'Qty': 'Quantity'} t = ck.consume( { 'bootstrap.servers': 'redpanda:29092', 'schema.registry.url': 'http://redpanda:8081' }, 'share_price', key_spec=KeyValueSpec.IGNORE, value_spec=ck.avro_spec('share_price_record', mapping=m), table_type=TableType.append() ) cols = t.columns self.assertEqual(7, len(cols)) self._assert_common_cols(cols) self.assertEqual("Ticker", cols[3].name) self.assertEqual(dtypes.string, cols[3].data_type) self.assertEqual("Side", cols[4].name) self.assertEqual(dtypes.string, cols[4].data_type) self.assertEqual("Quantity", cols[5].name) self.assertEqual(dtypes.int32, cols[5].data_type) self.assertEqual("Price", cols[6].name) self.assertEqual(dtypes.double, cols[6].data_type) @unittest.skip("https://github.com/deephaven/deephaven-core/pull/2277") def test_deprecated_table_types(self): """ Tests to make sure deprecated TableTypes are equivalent """ self.assertEqual(TableType.append(), TableType.Append) self.assertEqual(TableType.stream(), TableType.Stream) def test_table_types(self): """ Tests TableType construction """ _ = TableType.append() _ = TableType.stream() _ = TableType.ring(4096) if __name__ == "__main__": unittest.main()
36.105991
94
0.5515
[ "MIT" ]
lbooker42/deephaven-core
py/server/tests/test_kafka_consumer.py
7,835
Python
import re from base64 import b64decode from datetime import datetime from urllib.parse import urlparse import marshmallow as ma from app.objects.secondclass.c_link import Link from app.utility.base_object import BaseObject from app.utility.base_planning_svc import BasePlanningService class Agent(BaseObject): RESERVED = dict(server='#{server}', group='#{group}', agent_paw='#{paw}', location='#{location}', exe_name='#{exe_name}', payload=re.compile('#{payload:(.*?)}', flags=re.DOTALL)) class AgentSchema(ma.Schema): paw = ma.fields.String() group = ma.fields.String() architecture = ma.fields.String() platform = ma.fields.String() server = ma.fields.String() username = ma.fields.String() location = ma.fields.String() pid = ma.fields.Integer() ppid = ma.fields.Integer() trusted = ma.fields.Boolean() last_seen = ma.fields.DateTime(format='%Y-%m-%d %H:%M:%S') sleep_min = ma.fields.Integer() sleep_max = ma.fields.Integer() executors = ma.fields.List(ma.fields.String()) privilege = ma.fields.String() display_name = ma.fields.String() exe_name = ma.fields.String() host = ma.fields.String() watchdog = ma.fields.Integer() contact = ma.fields.String() links = ma.fields.List(ma.fields.String) @ma.pre_load def remove_nulls(self, in_data, **_): return {k: v for k, v in in_data.items() if v is not None} @property def unique(self): return self.hash(self.paw) @property def display(self): return dict(paw=self.paw, group=self.group, architecture=self.architecture, platform=self.platform, server=self.server, location=self.location, pid=self.pid, ppid=self.ppid, trusted=self.trusted, last_seen=self.last_seen.strftime('%Y-%m-%d %H:%M:%S'), sleep_min=self.sleep_min, sleep_max=self.sleep_max, executors=self.executors, privilege=self.privilege, display_name=self.display_name, exe_name=self.exe_name, host=self.host, watchdog=self.watchdog, contact=self.contact, links=[link.display for link in self.links]) @property def display_name(self): return '{}${}'.format(self.host, self.username) def __init__(self, sleep_min, sleep_max, watchdog, platform='unknown', server='unknown', host='unknown', username='unknown', architecture='unknown', group='red', location='unknown', pid=0, ppid=0, trusted=True, executors=(), privilege='User', exe_name='unknown', contact='unknown', paw=None): super().__init__() self.paw = paw if paw else self.generate_name(size=6) self.host = host self.username = username self.group = group self.architecture = architecture self.platform = platform url = urlparse(server) self.server = '%s://%s:%s' % (url.scheme, url.hostname, url.port) self.location = location self.pid = pid self.ppid = ppid self.trusted = trusted self.created = datetime.now() self.last_seen = self.created self.last_trusted_seen = self.created self.executors = executors self.privilege = privilege self.exe_name = exe_name self.sleep_min = int(sleep_min) self.sleep_max = int(sleep_max) self.watchdog = int(watchdog) self.contact = contact self.links = [] self.access = self.Access.BLUE if group == 'blue' else self.Access.RED @classmethod def from_dict(cls, dict_obj): """ Creates an Agent object from parameters stored in a dict. AgentSchema is used to validate inputs.""" return cls(**cls.AgentSchema().load(dict_obj, partial=['paw'])) def store(self, ram): existing = self.retrieve(ram['agents'], self.unique) if not existing: ram['agents'].append(self) return self.retrieve(ram['agents'], self.unique) return existing async def calculate_sleep(self): return self.jitter('%d/%d' % (self.sleep_min, self.sleep_max)) async def capabilities(self, ability_set): abilities = [] if self.executors: preferred = 'psh' if 'psh' in self.executors else self.executors[0] executors = self.executors for ai in set([pa.ability_id for pa in ability_set]): total_ability = [ab for ab in ability_set if (ab.ability_id == ai) and (ab.platform == self.platform) and (ab.executor in executors)] if len(total_ability) > 0: val = next((ta for ta in total_ability if ta.executor == preferred), total_ability[0]) if self.privileged_to_run(val): abilities.append(val) return abilities async def heartbeat_modification(self, **kwargs): now = datetime.now() self.last_seen = now if self.trusted: self.last_trusted_seen = now self.update('pid', kwargs.get('pid')) self.update('ppid', kwargs.get('ppid')) self.update('server', kwargs.get('server')) self.update('exe_name', kwargs.get('exe_name')) self.update('location', kwargs.get('location')) self.update('privilege', kwargs.get('privilege')) self.update('host', kwargs.get('host')) self.update('username', kwargs.get('username')) self.update('architecture', kwargs.get('architecture')) self.update('platform', kwargs.get('platform')) self.update('executors', kwargs.get('executors')) async def gui_modification(self, **kwargs): loaded = self.AgentSchema(only=('group', 'trusted', 'sleep_min', 'sleep_max', 'watchdog')).load(kwargs) for k, v in loaded.items(): self.update(k, v) async def kill(self): self.update('watchdog', 1) self.update('sleep_min', 60 * 2) self.update('sleep_max', 60 * 2) def replace(self, encoded_cmd, file_svc): decoded_cmd = b64decode(encoded_cmd).decode('utf-8', errors='ignore').replace('\n', '') decoded_cmd = decoded_cmd.replace(self.RESERVED['server'], self.server) decoded_cmd = decoded_cmd.replace(self.RESERVED['group'], self.group) decoded_cmd = decoded_cmd.replace(self.RESERVED['agent_paw'], self.paw) decoded_cmd = decoded_cmd.replace(self.RESERVED['location'], self.location) decoded_cmd = decoded_cmd.replace(self.RESERVED['exe_name'], self.exe_name) decoded_cmd = self._replace_payload_data(decoded_cmd, file_svc) return decoded_cmd def privileged_to_run(self, ability): if not ability.privilege or self.Privileges[self.privilege].value >= self.Privileges[ability.privilege].value: return True return False async def bootstrap(self, data_svc): abilities = [] for i in self.get_config(name='agents', prop='bootstrap_abilities'): for a in await data_svc.locate('abilities', match=dict(ability_id=i)): abilities.append(a) await self.task(abilities) async def task(self, abilities, facts=()): for i in await self.capabilities(abilities): self.links.append(Link(operation=None, command=i.test, paw=self.paw, ability=i)) return await BasePlanningService().add_test_variants(links=self.links, agent=self, facts=facts) """ PRIVATE """ def _replace_payload_data(self, decoded_cmd, file_svc): for uuid in re.findall(self.RESERVED['payload'], decoded_cmd): if self.is_uuid4(uuid): _, display_name = file_svc.get_payload_name_from_uuid(uuid) decoded_cmd = decoded_cmd.replace('#{payload:%s}' % uuid, display_name) return decoded_cmd
43.67033
118
0.626824
[ "Apache-2.0" ]
zaphodef/caldera
app/objects/c_agent.py
7,948
Python
__author__ = 'roehrig' """Satellite and reanalysis products """
16
36
0.734375
[ "MIT" ]
JRoehrig/pywarsa
warsa/precipitation/satellite/__init__.py
64
Python
#!/usr/bin/env python # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from codecs import open from setuptools import setup, find_packages try: from azure_bdist_wheel import cmdclass except ImportError: from distutils import log as logger logger.warn("Wheel is not available, disabling bdist_wheel hook") VERSION = '0.1.0' # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers CLASSIFIERS = [ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7' 'License :: OSI Approved :: MIT License', ] DEPENDENCIES = [] with open('README.md', 'r', encoding='utf-8') as f: README = f.read() with open('HISTORY.rst', 'r', encoding='utf-8') as f: HISTORY = f.read() setup( name='connection-monitor-preview', version=VERSION, description='Microsoft Azure Command-Line Connection Monitor V2 Extension', author='Microsoft Corporation', author_email='[email protected]', url='https://github.com/Azure/azure-cli-extensions/tree/master/src/connection-monitor-preview', long_description=README + '\n\n' + HISTORY, license='MIT', classifiers=CLASSIFIERS, packages=find_packages(), install_requires=DEPENDENCIES, package_data={'azext_connection_monitor_preview': ['azext_metadata.json']}, )
34.666667
99
0.635121
[ "MIT" ]
00Kai0/azure-cli-extensions
src/connection-monitor-preview/setup.py
1,976
Python
# ------------------------------------------------------------------------------ # CodeHawk Binary Analyzer # Author: Henny Sipma # ------------------------------------------------------------------------------ # The MIT License (MIT) # # Copyright (c) 2016-2020 Kestrel Technology LLC # Copyright (c) 2020 Henny Sipma # Copyright (c) 2021-2022 Aarno Labs LLC # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ------------------------------------------------------------------------------ """Utilities to print and save graphviz dot files.""" import os import subprocess from typing import TYPE_CHECKING if TYPE_CHECKING: from chb.util.DotGraph import DotGraph def print_dot( path: str, filename: str, g: "DotGraph") -> str: if not os.path.isabs(filename): filename = os.path.join(path, filename) dotfilename = filename + ".dot" pdffilename = filename + ".pdf" # write graph to dot format with open(dotfilename, "w") as fp: fp.write(str(g)) # convert dot file to pdf cmd = ["dot", "-Tpdf", "-o", pdffilename, dotfilename] try: subprocess.call(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print("Error in processing dot file: " + dotfilename) print(e.output) print(e.args) exit(1) return pdffilename def save_dot(path: str, filename: str, g: "DotGraph") -> None: if not os.path.isabs(filename): filename = os.path.join(path, filename) dotfilename = filename + ".dot" with open(dotfilename, "w") as fp: fp.write(str(g)) def save_svg(path: str, filename: str, g: "DotGraph") -> None: if not os.path.isabs(filename): filename = os.path.join(path, filename) dotfilename = filename + ".dot" svgfilename = filename + ".svg" with open(dotfilename, "w") as fp: fp.write(str(g)) cmd = ["dot", "-Tsvg", "-o", svgfilename, dotfilename] try: subprocess.call(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print("Error in processing dot file: " + dotfilename) print(e.output) print(e.args) exit(1)
36.375
80
0.634177
[ "MIT" ]
kestreltechnology/CodeHawk-Binary
chb/util/dotutil.py
3,201
Python
# qubit number=3 # total number=9 import numpy as np from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ import networkx as nx from qiskit.visualization import plot_histogram from typing import * from pprint import pprint from math import log2 from collections import Counter from qiskit.test.mock import FakeVigo, FakeYorktown kernel = 'circuit/bernstein' def make_circuit(n:int) -> QuantumCircuit: # circuit begin input_qubit = QuantumRegister(n,"qc") prog = QuantumCircuit(input_qubit) prog.h(input_qubit[0]) # number=1 prog.h(input_qubit[1]) # number=2 prog.h(input_qubit[2]) # number=3 prog.h(input_qubit[3]) # number=4 for edge in E: k = edge[0] l = edge[1] prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1]) prog.p(gamma, k) prog.p(gamma, l) prog.rx(2 * beta, range(len(V))) prog.swap(input_qubit[1],input_qubit[0]) # number=5 prog.swap(input_qubit[1],input_qubit[0]) # number=6 prog.y(input_qubit[3]) # number=7 prog.y(input_qubit[3]) # number=8 # circuit end return prog if __name__ == '__main__': n = 4 V = np.arange(0, n, 1) E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)] G = nx.Graph() G.add_nodes_from(V) G.add_weighted_edges_from(E) step_size = 0.1 a_gamma = np.arange(0, np.pi, step_size) a_beta = np.arange(0, np.pi, step_size) a_gamma, a_beta = np.meshgrid(a_gamma, a_beta) F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * ( 1 + np.cos(4 * a_gamma) ** 2) result = np.where(F1 == np.amax(F1)) a = list(zip(result[0], result[1]))[0] gamma = a[0] * step_size beta = a[1] * step_size prog = make_circuit(4) sample_shot =5200 writefile = open("../data/startQiskit_Class82.csv", "w") # prog.draw('mpl', filename=(kernel + '.png')) backend = BasicAer.get_backend('statevector_simulator') circuit1 = transpile(prog, FakeYorktown()) prog = circuit1 info = execute(prog,backend=backend, shots=sample_shot).result().get_counts() print(info, file=writefile) print("results end", file=writefile) print(circuit1.depth(), file=writefile) print(circuit1, file=writefile) writefile.close()
27.215909
118
0.634238
[ "BSD-3-Clause" ]
UCLA-SEAL/QDiff
data/p4VQE/R1/benchmark/startQiskit_Class82.py
2,395
Python
# flake8: noqa """ OpenAPI definition No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: v0 Generated by: https://openapi-generator.tech """ __version__ = "0.6.0" # import ApiClient from gooddata_afm_client.api_client import ApiClient # import Configuration from gooddata_afm_client.configuration import Configuration # import exceptions from gooddata_afm_client.exceptions import OpenApiException from gooddata_afm_client.exceptions import ApiAttributeError from gooddata_afm_client.exceptions import ApiTypeError from gooddata_afm_client.exceptions import ApiValueError from gooddata_afm_client.exceptions import ApiKeyError from gooddata_afm_client.exceptions import ApiException
29.071429
124
0.829238
[ "MIT" ]
SubTachyon/gooddata-python-sdk
gooddata-afm-client/gooddata_afm_client/__init__.py
814
Python
people = 30 cars = 40 trucks = 15 if cars > people: print("We should take the cars.") elif cars < people: print("We should not take the cars.") else: print("We can't decide.") if trucks > cars: print("That's too many trucks.") elif trucks < cars: print("Maybe we could take the trucks.") else: print("We still can't decide.") if people > trucks: print("Alright, let's just take the trucks.") else: print("Fine, let's stay home then.")
24.2
50
0.621901
[ "MIT" ]
AyeThandarAung/python-exercises
ex30.py
484
Python
import RPi.GPIO as gpio import time from subprocess import Popen, PIPE, call pin =38 gpio.setmode(gpio.BOARD) gpio.setup(pin, gpio.IN, pull_up_down = gpio.PUD_UP) PRESSED = 0 prev_state = 1 pressed_time = 0.1 skip_song_mode = False try: while True: cur_state = gpio.input(pin) if cur_state == PRESSED: pressed_time += 0.1 print "pressed : " + str( pressed_time) if pressed_time > 1: call(["espeak", "-ven", "shutting down"]) elif pressed_time == 0.1: skip_song_mode = True else: skip_song_mode = False else: pressed_time = 0 if skip_song_mode == True: call(["espeak", "-ven", "skip song"]) skip_song_mode = False time.sleep(0.1) finally: gpio.cleanup()
24.75
53
0.60101
[ "MIT" ]
pythononwheels/opentoni
test_taster5.py
792
Python
from setuptools import setup from os import path this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup(name='ssh_tarpit', version='0.2.4', description="SSH tarpit that slowly sends an endless banner", url='https://github.com/Snawoot/ssh-tarpit', author='Vladislav Yarmak', author_email='[email protected]', license='MIT', packages=['ssh_tarpit'], python_requires='>=3.5.3', setup_requires=[ 'wheel', ], install_requires=[ ], entry_points={ 'console_scripts': [ 'ssh-tarpit=ssh_tarpit.__main__:main', ], }, classifiers=[ "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Operating System :: OS Independent", "Development Status :: 4 - Beta", "Environment :: Console", "Environment :: No Input/Output (Daemon)", "Intended Audience :: System Administrators", "Natural Language :: English", "Topic :: Internet", "Topic :: Utilities", "Topic :: Security", ], long_description=long_description, long_description_content_type='text/markdown', zip_safe=True)
32.386364
73
0.586667
[ "Unlicense" ]
trideeindhoven/ssh-tarpit
setup.py
1,425
Python
from __future__ import absolute_import from __future__ import division from __future__ import print_function from matplotlib import pyplot as plt import tensorflow as tf import seaborn as sb import pandas as pd import numpy as np import math import time import cv2 import os tf.reset_default_graph() gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.85) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) # tip: if you run into problems with TensorBoard # clear the contents of this directory, re-run this script # then restart TensorBoard to see the result # LOGDIR = './graphs' model_frames = 64 NUM_CLASSES = 74 NUM_PIXELS = 88 * 128 TRAIN_STEPS = 0 BATCH_SIZE = 1 << 5 MODEL_ANGLE_DICT = {'000': True, '018': False, '036': False, '054': False, '072': False, '090': False, '108': False, '126': False, '144': False, '162': False, '180': False} TEST_ANGLE_DICT = {'000': False, '018': False, '036': False, '054': True, '072': False, '090': False, '108': False, '126': False, '144': False, '162': False, '180': False} LEARNING_RATE = 1e-4 DATA_PATH = 'Generated_full_data_GEI' start_time = time.time() keep_prob = 0.5 #dropout (keep probability) def del_files(path): for root, dirs, files in os.walk(path): for name in files: if name.startswith("."): os.remove(os.path.join(root, name)) print("Delete File: " + os.path.join(root, name)) def get_label(_index, num_classes): # label = np.zeros(shape=[num_classes], dtype='float32') # label[int(_index) - 1] = 1 # return label return (int(_index) - 1) def load_images_from_folder(folder, model_angle_dict, test_angle_dict): train_frames = [] train_labels = [] probe_frames = [] probe_labels = [] for i in xrange(11): train_frames.append([]) for i in xrange(11): train_labels.append([]) for i in xrange(11): probe_frames.append([]) for i in xrange(11): probe_labels.append([]) for human_id in os.listdir(os.path.join(folder, 'train')): if int(human_id) < 74: continue for angle in os.listdir(os.path.join(folder, 'train', human_id)): # if not model_angle_dict[angle]: # continue for _type in os.listdir(os.path.join(folder, 'train', human_id, angle)): img = cv2.imread(os.path.join(folder, 'train', human_id, angle, _type), 0) if img is not None: train_frames[int(angle) // 18].append(img.flatten()) train_labels[int(angle) // 18].append(get_label(human_id, 124)) for human_id in os.listdir(os.path.join(folder, 'test')): for angle in os.listdir(os.path.join(folder, 'test', human_id)): # if not test_angle_dict[angle]: # continue for _type in os.listdir(os.path.join(folder, 'test', human_id, angle)): img = cv2.imread(os.path.join(folder, 'test', human_id, angle, _type), 0) if img is not None: probe_frames[int(angle) // 18].append(img.flatten()) probe_labels[int(angle) // 18].append(get_label(human_id, 124)) return (train_frames, train_labels, probe_frames, probe_labels) del_files(DATA_PATH) (train_frames, train_labels, probe_frames, probe_labels) = load_images_from_folder(DATA_PATH, MODEL_ANGLE_DICT, TEST_ANGLE_DICT) # Define inputs with tf.name_scope('input'): images = tf.placeholder(tf.float32, [None, NUM_PIXELS], name="pixels") labels = tf.placeholder(tf.float32, [None, NUM_CLASSES], name="labels") # dropout_prob = tf.placeholder_with_default(1.0, shape=()) # Create some wrappers for simplicity def conv2d(x, W, b, strides=1): # Conv2D wrapper, with bias and relu activation x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') x = tf.nn.bias_add(x, b) return tf.nn.relu(x) def maxpool2d(x, k=2): # MaxPool2D wrapper return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, 2, 2, 1], padding='SAME') # Create model def conv_net(x, weights, biases, dropout): # Reshape input picture x = tf.reshape(x, shape=[-1, 128, 88, 1]) # Convolution Layer conv1 = conv2d(x, weights['wc1'], biases['bc1']) # Max Pooling (down-sampling) conv1 = maxpool2d(conv1, k=2) conv1 = tf.contrib.layers.batch_norm(conv1) # Convolution Layer conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) # Max Pooling (down-sampling) conv2 = maxpool2d(conv2, k=3) conv2 = tf.contrib.layers.batch_norm(conv2) # Fully connected layer # Reshape conv2 output to fit fully connected layer input fc3 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]]) fc3 = tf.add(tf.matmul(fc3, weights['wd1']), biases['bd1']) fc3 = tf.nn.relu(fc3) # Apply Dropout # fc1 = tf.nn.dropout(fc1, dropout) # fc3 = tf.nn.dropout(fc3, dropout_prob) # # Output, class prediction fc4 = tf.add(tf.matmul(fc3, weights['fc4']), biases['fc4']) return fc3 # Store layers weight & bias initializer = tf.contrib.layers.xavier_initializer() weights = { # 7x7 conv, 1 input, 18 outputs 'wc1': tf.Variable(initializer([7, 7, 1, 18])), # 5x5 conv, 18 inputs, 45 outputs 'wc2': tf.Variable(initializer([5, 5, 18, 45])), # fully connected, 7*7*64 inputs, 1024 outputs 'wd1': tf.Variable(initializer([32*22*45, 1024])), # # 1024 inputs, 10 outputs (class prediction) 'fc4': tf.Variable(initializer([1024, NUM_CLASSES])) } biases = { 'bc1': tf.Variable(tf.random_normal([18])), 'bc2': tf.Variable(tf.random_normal([45])), 'bd1': tf.Variable(tf.random_normal([1024])), 'fc4': tf.Variable(tf.random_normal([NUM_CLASSES])) } y = conv_net(images, weights, biases, keep_prob) sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess, "./full_tri_model/model.ckpt") print("%d frames model restored."%model_frames) print(' ', end=',') for i in xrange(11): print('%4d'%(i * 18), end=',') print_map = np.zeros(shape=(11, 11), dtype=np.float32) gallery_encoding = [] probe_encoding = [] for a in range(11): gallery_encoding.append(sess.run(y, feed_dict={images: train_frames[a]})) for a in range(11): probe_encoding.append(sess.run(y, feed_dict={images: probe_frames[a]})) for a in range(11): print('') print('%3d'%(a * 18), end=',') for b in range(11): simlarity = np.zeros(shape=[len(probe_encoding[b]), len(gallery_encoding[a])], dtype=np.float32) pred_label = np.zeros(shape=[len(probe_encoding[b])], dtype=np.int) for i in range(len(probe_encoding[b])): for j in range(len(gallery_encoding[a])): simlarity[i][j] = np.exp(-(((probe_encoding[b][i] - gallery_encoding[a][j])/1024.0)**2).sum()) # import pdb # pdb.set_trace() tmp_index = simlarity[i].argmax() pred_label[i] = train_labels[a][tmp_index] # if not (pred_label[i] == probe_labels[i]): # print(str((pred_label[i] == probe_labels[i])) + ' ' + str(pred_label[i]) + ' ' + str(probe_labels[i])) acc = np.sum(pred_label[:] == probe_labels[b][:]) # print_map[b][10 - a] = 100.0 * acc/(len(probe_labels[b])*1.0) print_map[b][a] = 100.0 * acc/(len(probe_labels[b])*1.0) print('%.2f'%(100.0 * acc/(len(probe_labels[b])*1.0)), end=',') print(print_map) grid_visualization = np.array(print_map.transpose()) grid_visualization.shape = (11, 11) sb.heatmap(grid_visualization, cmap='Oranges') plt.xticks(np.arange(11) + 0.5, xrange(0, 181, 18)) plt.yticks(np.arange(11) + 0.5, xrange(180, -1, -18)) plt.xlabel('Gallery Angle') plt.ylabel('Probe Angle') plt.show()
33.239496
172
0.628871
[ "MIT" ]
136823xuewei/gait-recognition
classification/casiab_performance.py
7,911
Python
""" Parsing time durations from strings This module provides a function that parses time durations from strings. It has been copied from the django software, which comes with the following notes: Copyright (c) Django Software Foundation and individual contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Django nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import datetime import re standard_duration_re = re.compile( r"^" r"(?:(?P<days>-?\d+) (days?, )?)?" r"((?:(?P<hours>-?\d+):)(?=\d+:\d+))?" r"(?:(?P<minutes>-?\d+):)?" r"(?P<seconds>-?\d+)" r"(?:\.(?P<microseconds>\d{1,6})\d{0,6})?" r"$" ) # Support the sections of ISO 8601 date representation that are accepted by # timedelta iso8601_duration_re = re.compile( r"^(?P<sign>[-+]?)" r"P" r"(?:(?P<days>\d+(.\d+)?)D)?" r"(?:T" r"(?:(?P<hours>\d+(.\d+)?)H)?" r"(?:(?P<minutes>\d+(.\d+)?)M)?" r"(?:(?P<seconds>\d+(.\d+)?)S)?" r")?" r"$" ) # Support PostgreSQL's day-time interval format, e.g. "3 days 04:05:06". The # year-month and mixed intervals cannot be converted to a timedelta and thus # aren't accepted. postgres_interval_re = re.compile( r"^" r"(?:(?P<days>-?\d+) (days? ?))?" r"(?:(?P<sign>[-+])?" r"(?P<hours>\d+):" r"(?P<minutes>\d\d):" r"(?P<seconds>\d\d)" r"(?:\.(?P<microseconds>\d{1,6}))?" r")?$" ) def parse_duration(value: str) -> datetime.timedelta: """Parse a duration string and return a datetime.timedelta. Args: value (str): A time duration given as text. The preferred format for durations is '%d %H:%M:%S.%f'. This function also supports ISO 8601 representation and PostgreSQL's day-time interval format. Returns: datetime.timedelta: An instance representing the duration. """ match = ( standard_duration_re.match(value) or iso8601_duration_re.match(value) or postgres_interval_re.match(value) ) if match: kw = match.groupdict() days = datetime.timedelta(float(kw.pop("days", 0) or 0)) sign = -1 if kw.pop("sign", "+") == "-" else 1 if kw.get("microseconds"): kw["microseconds"] = kw["microseconds"].ljust(6, "0") if ( kw.get("seconds") and kw.get("microseconds") and kw["seconds"].startswith("-") ): kw["microseconds"] = "-" + kw["microseconds"] kw = {k: float(v) for k, v in kw.items() if v is not None} return days + sign * datetime.timedelta(**kw) # type: ignore else: raise ValueError(f"The time duration {value} cannot be parsed.") __all__ = ["parse_duration"]
36.079646
80
0.651459
[ "MIT" ]
lmenou/py-pde
pde/tools/parse_duration.py
4,077
Python
class solution: def twoSum(self,nums,target): hash_map=dict() # Python字典初始化{}性能比dict()好 for i,x in enumerate(nums): if target-x in hash_map: return [i,hash_map[target-x]] hash_map[x]=i
29.222222
46
0.524715
[ "MIT" ]
Sprinter1999/Algorithm
LeetCode/Leetcode-2019Summer/Leetcode001(a+b).py
281
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2015-2017 Lionheart Software LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import runpy try: from setuptools import setup except ImportError: from distutils.core import setup metadata_filename = "bigstore/metadata.py" metadata = runpy.run_path(metadata_filename) # http://pypi.python.org/pypi?:action=list_classifiers classifiers = [ "Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: Unix", "Operating System :: MacOS :: MacOS X", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Version Control", "Topic :: Utilities", ] setup( name='git-bigstore', description="Track big files with Git.", version=metadata['__version__'], license=metadata['__license__'], classifiers=classifiers, author=metadata['__author__'], author_email=metadata['__email__'], url="https://github.com/lionheart/git-bigstore", packages=[ 'bigstore.backends', 'bigstore', ], scripts=[ 'bin/git-bigstore', ], install_requires=[ 'future', 'gitpython<3', 'boto', 'boto3', 'python-dateutil', 'pytz', 'python-cloudfiles;python_version<="2.7"', ], )
29.287671
74
0.66464
[ "Apache-2.0" ]
lionheart/git-bigstore
setup.py
2,138
Python
# -*- coding: utf-8 -*- """ test_searchadapters ~~~~~~~~~~~~~~~~~~~ Test the Web Support Package search adapters. :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ from six import StringIO from sphinx.websupport import WebSupport from test_websupport import sqlalchemy_missing from util import rootdir, tempdir, skip_if, skip_unless_importable def teardown_module(): (tempdir / 'websupport').rmtree(True) def search_adapter_helper(adapter): settings = {'srcdir': rootdir / 'roots' / 'test-searchadapters', 'builddir': tempdir / 'websupport', 'status': StringIO(), 'warning': StringIO(), 'search': adapter} support = WebSupport(**settings) support.build() s = support.search # Test the adapters query method. A search for "Epigraph" should return # one result. results = s.query(u'Epigraph') assert len(results) == 1, \ '%s search adapter returned %s search result(s), should have been 1'\ % (adapter, len(results)) # Make sure documents are properly updated by the search adapter. s.init_indexing(changed=['markup']) s.add_document(u'markup', u'filename', u'title', u'SomeLongRandomWord') s.finish_indexing() # Now a search for "Epigraph" should return zero results. results = s.query(u'Epigraph') assert len(results) == 0, \ '%s search adapter returned %s search result(s), should have been 0'\ % (adapter, len(results)) # A search for "SomeLongRandomWord" should return one result. results = s.query(u'SomeLongRandomWord') assert len(results) == 1, \ '%s search adapter returned %s search result(s), should have been 1'\ % (adapter, len(results)) # Make sure it works through the WebSupport API support.get_search_results(u'SomeLongRandomWord') @skip_unless_importable('xapian', 'needs xapian bindings installed') @skip_if(sqlalchemy_missing, 'needs sqlalchemy') def test_xapian(): search_adapter_helper('xapian') @skip_unless_importable('whoosh', 'needs whoosh package installed') @skip_if(sqlalchemy_missing, 'needs sqlalchemy') def test_whoosh(): search_adapter_helper('whoosh')
32.614286
77
0.673237
[ "BSD-2-Clause" ]
zzqcn/sphinx-doc
tests/test_searchadapters.py
2,283
Python
from __future__ import division, print_function, absolute_import from subprocess import Popen, PIPE, STDOUT import numpy as np SZ = [2, 3, 4, 8, 12, 15, 16, 17, 32, 64, 128, 256, 512, 1024] def gen_data(dt): arrays = {} if dt == np.float128: pg = './fftw_longdouble' elif dt == np.double: pg = './fftw_double' elif dt == np.float32: pg = './fftw_single' else: raise ValueError("unknown: %s" % dt) # Generate test data using FFTW for reference for type in [1, 2, 3, 4, 5, 6, 7, 8]: arrays[type] = {} for sz in SZ: a = Popen([pg, str(type), str(sz)], stdout=PIPE, stderr=STDOUT) st = [i.decode('ascii').strip() for i in a.stdout.readlines()] arrays[type][sz] = np.fromstring(",".join(st), sep=',', dtype=dt) return arrays # generate single precision data data = gen_data(np.float32) filename = 'fftw_single_ref' # Save ref data into npz format d = {'sizes': SZ} for type in [1, 2, 3, 4]: for sz in SZ: d['dct_%d_%d' % (type, sz)] = data[type][sz] d['sizes'] = SZ for type in [5, 6, 7, 8]: for sz in SZ: d['dst_%d_%d' % (type-4, sz)] = data[type][sz] np.savez(filename, **d) # generate double precision data data = gen_data(np.float64) filename = 'fftw_double_ref' # Save ref data into npz format d = {'sizes': SZ} for type in [1, 2, 3, 4]: for sz in SZ: d['dct_%d_%d' % (type, sz)] = data[type][sz] d['sizes'] = SZ for type in [5, 6, 7, 8]: for sz in SZ: d['dst_%d_%d' % (type-4, sz)] = data[type][sz] np.savez(filename, **d) # generate long double precision data data = gen_data(np.float128) filename = 'fftw_longdouble_ref' # Save ref data into npz format d = {'sizes': SZ} for type in [1, 2, 3, 4]: for sz in SZ: d['dct_%d_%d' % (type, sz)] = data[type][sz] d['sizes'] = SZ for type in [5, 6, 7, 8]: for sz in SZ: d['dst_%d_%d' % (type-4, sz)] = data[type][sz] np.savez(filename, **d)
25.831169
77
0.577677
[ "MIT" ]
123972/PCA-nutricion
environment/lib/python3.8/site-packages/scipy/fftpack/tests/gen_fftw_ref.py
1,989
Python
# coding: utf-8 import numpy as np import csv import codecs import os import glob from collections import defaultdict SPACE = " " EMPTY = " " INV_PUNCTUATION_CODES = {EMPTY:0, SPACE:0, ',':1, '.':2, '?':3, '!':4, '-':5, ';':6, ':':7, '...':8, '':0} PUNCTUATION_VOCABULARY = {0:SPACE, 1:',', 2:'.', 3:'?', 4:'!', 5:'-', 6:';', 7:':', 8:'...'} REDUCED_PUNCTUATION_VOCABULARY = {0:SPACE, 1:',', 2:'.', 3:'?'} REDUCED_INV_PUNCTUATION_CODES = {EMPTY:0, SPACE:0, ',':1, '.':2, '?':3, '':0} EOS_PUNCTUATION_CODES = [2,3,4,5,6,7,8] END = "<END>" UNK = "<UNK>" EMP = "<EMP>" NA = "NA" #PAUSE_FEATURE_NAME = 'pause_before' #ALL_POSSIBLE_INPUT_FEATURES = {'word', 'pos', 'pause_before', 'speech_rate_norm', 'f0_mean', 'f0_range', 'i0_mean', 'i0_range'} def pad(l, size, padding): if size >= len(l): return l + [padding] * abs((len(l)-size)) else: return l[0:size] def read_proscript(filename, add_end=False): columns = defaultdict(list) # each value in each column is appended to a list skip_columns = [] with open(filename) as f: reader = csv.DictReader(f, delimiter='|') # read rows into a dictionary format for row in reader: # read a row as {column1: value1, column2: value2,...} for (k,v) in row.items(): # go over each column name and value if not k in skip_columns: if "word" in k or "punctuation" in k or "pos" in k: columns[k].append(v) # append the value into the appropriate list else: try: columns[k].append(float(v)) # real value except ValueError: skip_columns.append(k) if add_end and not columns['word'][-1] == END: for k in columns.keys(): if "word" in k or "pos" in k: columns[k].append(END) elif "punctuation" in k: columns[k].append("") else: columns[k].append(0.0) return columns def checkArgument(argname, isFile=False, isDir=False, createDir=False): if not argname: return False else: if isFile and not os.path.isfile(argname): return False if isDir: if not os.path.isdir(argname): if createDir: print("Creating directory %s"%(argname)) os.makedirs(argname) else: return False return True def iterable_to_dict(arr): return dict((x.strip(), i) for (i, x) in enumerate(arr)) def read_vocabulary(file_name): with codecs.open(file_name, 'r', 'utf-8') as f: return iterable_to_dict(f.readlines()) def to_array(arr, dtype=np.int32): # minibatch of 1 sequence as column return np.array([arr], dtype=dtype).T def create_pause_bins(): bins = np.arange(0, 1, 0.05) bins = np.concatenate((bins, np.arange(1, 2, 0.1))) bins = np.concatenate((bins, np.arange(2, 5, 0.2))) bins = np.concatenate((bins, np.arange(5, 10, 0.5))) bins = np.concatenate((bins, np.arange(10, 20, 1))) return bins def create_pause_bins9(): bins = np.array([ 0. , 0.25, 0.5 , 0.75, 1. , 2. , 3. , 4. , 5. ]) return bins def create_pause_bins2(): return [0.0, 1.14] def create_pause_bins3(): return [0.0, 0.2, 1.0] def create_semitone_bins(): bins = np.arange(-20, -10, 1) bins = np.concatenate((bins, np.arange(-10, -5, 0.5))) bins = np.concatenate((bins, np.arange(-5, 0, 0.25))) bins = np.concatenate((bins, np.arange(0, 5, 0.25))) bins = np.concatenate((bins, np.arange(5, 10, 0.5))) bins = np.concatenate((bins, np.arange(10, 20, 1))) return bins def levels_from_file(filename): with open(filename) as f: lst = [float(line.rstrip()) for line in f] return lst def get_level_maker(levels_file): levels_list = levels_from_file(levels_file) def get_level(value): level = 0 for level_bin in levels_list: if value > level_bin: level +=1 else: return level return level no_of_levels = len(levels_list) + 1 return get_level, no_of_levels #OBSOLETE def convert_value_to_level_sequence(value_sequence, bins): levels = [] for value in value_sequence: level = 0 for bin_no, bin_upper_limit in enumerate(bins): if value > bin_upper_limit: level += 1 else: break levels.append(level) return levels def reducePuncCode(puncCode): if puncCode in [4, 5, 6, 7, 8]: #period return 2 else: return puncCode def reducePunc(punc): if punc and not punc.isspace(): puncCode = INV_PUNCTUATION_CODES[punc] reducedPuncCode = reducePuncCode(puncCode) return PUNCTUATION_VOCABULARY[reducedPuncCode] else: return punc
27.870968
128
0.658102
[ "MIT" ]
LisanneWiengarten/Punctuation
utilities.py
4,320
Python
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php def asbool(obj): if isinstance(obj, str): obj = obj.strip().lower() if obj in ['true', 'yes', 'on', 'y', 't', '1']: return True elif obj in ['false', 'no', 'off', 'n', 'f', '0']: return False else: raise ValueError( "String is not true/false: %r" % obj) return bool(obj) def aslist(obj, sep=None, strip=True): if isinstance(obj, str): lst = obj.split(sep) if strip: lst = [v.strip() for v in lst] return lst elif isinstance(obj, (list, tuple)): return obj elif obj is None: return [] else: return [obj]
28.133333
84
0.5391
[ "Apache-2.0" ]
Tezar/Assigment-generator
ita/web/beaker/converters.py
844
Python
# Copyright (c) 2016 Universidade Federal Fluminense (UFF) # Copyright (c) 2016 Polytechnic Institute of New York University. # This file is part of noWorkflow. # Please, consult the license terms in the LICENSE file. """Trial Model""" from __future__ import (absolute_import, print_function, division, unicode_literals) import os from sqlalchemy import Column, Integer, Text, TIMESTAMP from sqlalchemy import ForeignKeyConstraint, select, func, distinct from ...utils.formatter import PrettyLines from ...utils.prolog import PrologDescription, PrologTrial, PrologNullableRepr from ...utils.prolog import PrologTimestamp, PrologAttribute, PrologRepr from ...utils.prolog import PrologNullable from .. import relational, content, persistence_config from .base import AlchemyProxy, proxy_class, query_many_property, proxy_gen from .base import one, many_ref, many_viewonly_ref, backref_many, is_none from .base import proxy from .trial_prolog import TrialProlog from .trial_dot import TrialDot from .module import Module from .dependency import Dependency from .activation import Activation from .head import Head from .graphs.trial_graph import TrialGraph from .graphs.dependency_graph import DependencyConfig, DependencyFilter from .graphs.dependency_graph import PrologVisitor @proxy_class # pylint: disable=too-many-public-methods class Trial(AlchemyProxy): """Represent a trial Initialize it by passing a trial reference: trial = Trial(2) There are four visualization modes for the graph: tree: activation tree without any filters trial.graph.mode = 0 no match: tree transformed into a graph by the addition of sequence and return edges and removal of intermediate call edges trial.graph.mode = 1 exact match: calls are only combined when all the sub-call match trial.graph.mode = 2 namesapce: calls are combined without considering the sub-calls trial.graph.mode = 3 You can change the graph width and height by the variables: trial.graph.width = 600 trial.graph.height = 400 """ __tablename__ = "trial" __table_args__ = ( ForeignKeyConstraint(["inherited_id"], ["trial.id"], ondelete="RESTRICT"), ForeignKeyConstraint(["parent_id"], ["trial.id"], ondelete="SET NULL"), {"sqlite_autoincrement": True}, ) id = Column(Integer, primary_key=True) # pylint: disable=invalid-name start = Column(TIMESTAMP) finish = Column(TIMESTAMP) script = Column(Text) code_hash = Column(Text) arguments = Column(Text) command = Column(Text) inherited_id = Column(Integer, index=True) parent_id = Column(Integer, index=True) run = Column(Integer) docstring = Column(Text) inherited = one( "Trial", backref="bypass_children", viewonly=True, remote_side=[id], primaryjoin=(id == inherited_id) ) parent = one( "Trial", backref="children", viewonly=True, remote_side=[id], primaryjoin=(id == parent_id) ) function_defs = many_ref("trial", "FunctionDef") module_dependencies = many_ref("trials", "Dependency") dmodules = many_ref("trials", "Module", secondary=Dependency.t) environment_attrs = many_ref("trial", "EnvironmentAttr") activations = many_ref("trial", "Activation", order_by=Activation.m.start) file_accesses = many_viewonly_ref("trial", "FileAccess") objects = many_viewonly_ref("trial", "Object") object_values = many_viewonly_ref("trial", "ObjectValue") variables = many_viewonly_ref("trial", "Variable") variable_usages = many_viewonly_ref("trial", "VariableUsage") variable_dependencies = many_viewonly_ref("trial", "VariableDependency") tags = many_ref("trial", "Tag") bypass_children = backref_many("bypass_children") # Trial.inherited children = backref_many("children") # Trial.parent @query_many_property def local_modules(self): """Load local modules. Return SQLAlchemy query""" return self.modules.filter( # pylint: disable=no-member Module.m.path.like("%{}%".format(persistence_config.base_path))) @query_many_property def modules(self): """Load modules. Return SQLAlchemy query""" if self.inherited: return self.inherited.modules return self.dmodules @query_many_property def dependencies(self): """Load modules. Return SQLAlchemy query""" if self.inherited: return self.inherited.dependencies return self.module_dependencies @query_many_property def initial_activations(self): """Return initial activation as a SQLAlchemy query""" return self.activations.filter(is_none(Activation.m.caller_id)) DEFAULT = { "dependency_config.show_blackbox_dependencies": False, "dot.format": "png", "graph.width": 500, "graph.height": 500, "graph.mode": 3, "graph.use_cache": True, "prolog.use_cache": True, } REPLACE = { "dependency_config_show_blackbox_dependencies": "dependency_config.show_blackbox_dependencies", "dot_format": "dot.format", "graph_width": "graph.width", "graph_height": "graph.height", "graph_mode": "graph.mode", "graph_use_cache": "graph.use_cache", "prolog_use_cache": "prolog.use_cache", } prolog_description = PrologDescription("trial", ( PrologTrial("id"), PrologTimestamp("start"), PrologTimestamp("finish"), PrologRepr("script"), PrologRepr("code_hash"), PrologRepr("command"), PrologNullable("inherited_id", link="trial.id"), PrologNullable("parent_id", link="trial.id"), PrologAttribute("run"), PrologNullableRepr("docstring"), ), description=( "informs that a given *script* with *docstring*,\n" "and content *code_hash*,\n" "executed during a time period from *start*" "to *finish*,\n" "using noWokflow's *command*,\n" "that generated a trial *id*.\n" "This trial uses modules from *inherited_id*,\n" "is based on *parent_id*,\n" "and might be a *run* or a backup trial." )) def __init__(self, *args, **kwargs): if args and isinstance(args[0], relational.base): obj = args[0] trial_ref = obj.id elif args: trial_ref = kwargs.get("trial_ref", args[0]) else: trial_ref = kwargs.get("trial_ref", None) # Check if it is a new trial or a query script = kwargs.get("trial_script", None) if "use_cache" in kwargs: cache = kwargs["use_cache"] kwargs["graph_use_cache"] = kwargs.get("graph_use_cache", cache) kwargs["prolog_use_cache"] = kwargs.get("graph_use_cache", cache) session = relational.session if not trial_ref or trial_ref == -1: obj = Trial.last_trial(script=script, session=session) if "graph_use_cache" not in kwargs: kwargs["graph_use_cache"] = False if "prolog_use_cache" not in kwargs: kwargs["prolog_use_cache"] = False else: obj = Trial.load_trial(trial_ref, session=session) if obj is None: raise RuntimeError("Trial {} not found".format(trial_ref)) super(Trial, self).__init__(obj) #self._store_pk(obj) #self._restore_instance() self.dependency_config = DependencyConfig() self.dependency_filter = DependencyFilter(self) self.graph = TrialGraph(self) self.prolog = TrialProlog(self) self.dot = TrialDot(self) self.initialize_default(kwargs) self._prolog_visitor = None @property def prolog_variables(self): """Return filtered prolog variables""" if not self._prolog_visitor: self.dependency_filter.run() self._prolog_visitor = PrologVisitor(self.dependency_filter) self._prolog_visitor.visit(self.dependency_filter.main_cluster) return self._prolog_visitor @property def script_content(self): """Return the "main" script content of the trial""" return PrettyLines( content.get(self.code_hash) .decode("utf-8").split("/n")) @property def finished(self): """Check if trial has finished""" return bool(self.finish) @property def status(self): """Check trial status Possible statuses: finished, unfinished, backup""" if not self.run: return "backup" return "finished" if self.finished else "unfinished" @property def duration(self): """Calculate trial duration. Return microseconds""" if self.finish: return int((self.finish - self.start).total_seconds() * 1000000) return 0 @property def duration_text(self): """Calculate trial duration. Return formatted str""" if self.finish: return str(self.finish - self.start) return "None" @property def environment(self): """Return dict: environment variables -> value""" return {e.name: e.value for e in self.environment_attrs} def versioned_files(self, skip_script=False, skip_local=False, skip_access=False): """Find first files accessed in a trial Return map with relative path -> (code_hash, type) Possible types: script, module, access """ files = {} def add(path, info): """Add file to dict""" if os.path.isabs(path): if not persistence_config.base_path in path: return path = os.path.relpath(path, persistence_config.base_path) files[path] = info if not skip_script: add(self.script, {"code_hash": self.code_hash, "type": "script"}) if not skip_local: for module in self.local_modules: # pylint: disable=not-an-iterable add(module.path, { "code_hash": module.code_hash, "type": "module", "name": module.name }) if not skip_access: for faccess in reversed(list(self.file_accesses)): add(faccess.name, { "code_hash": faccess.content_hash_before, "type": "access", }) return files def iterate_accesses(self, path=None): """Iterate on all access to a path""" if not path or self.script.endswith(path): yield self.script, {"code_hash": self.code_hash, "type": "script"} for module in self.local_modules: # pylint: disable=not-an-iterable if not path or module.path.endswith(path): yield module.path, { "code_hash": module.code_hash, "type": "module", "name": module.name } for faccess in list(self.file_accesses): if not path or faccess.name.endswith(path): yield faccess.name, { "code_hash": faccess.content_hash_before, "type": "access", } yield faccess.name, { "code_hash": faccess.content_hash_after, "type": "access", } def create_head(self): """Create head for this trial""" session = relational.make_session() session.query(Head.m).filter(Head.m.script == self.script).delete() # pylint: disable=no-member session.add(Head.m(trial_id=self.id, script=self.script)) # pylint: disable=no-member, not-callable session.commit() # pylint: disable=no-member def query(self, query): """Run prolog query""" return self.prolog.query(query) def _ipython_display_(self): """Display history graph""" if hasattr(self, "graph"): # pylint: disable=protected-access return self.graph._ipython_display_() from IPython.display import display display({ 'text/plain': 'Trial {}'.format(self.id) }, raw=True) def show(self, _print=print): """Print trial information""" _print("""\ Id: {t.id} Inherited Id: {t.inherited_id} Script: {t.script} Code hash: {t.code_hash} Start: {t.start} Finish: {t.finish} Duration: {t.duration_text}\ """.format(t=self)) def __repr__(self): return "Trial({})".format(self.id) @classmethod # query def distinct_scripts(cls): """Return a set with distinct scripts""" return {s[0].rsplit("/", 1)[-1] for s in relational.session.query(distinct(cls.m.script))} @classmethod # query def reverse_trials(cls, limit, session=None): """Return a generator with <limit> trials ordered by start time desc""" session = session or relational.session return proxy_gen( session.query(cls.m) .order_by(cls.m.start.desc()) .limit(limit) ) @classmethod # query def last_trial(cls, script=None, parent_required=False, session=None): """Return last trial according to start time Keyword arguments: script -- specify the desired script (default=None) parent_required -- valid only if script exists (default=False) """ model = cls.m session = session or relational.session trial = ( session.query(model) .filter(model.start.in_( select([func.max(model.start)]) .where(model.script == script) )) ).first() if trial or parent_required: return trial return ( session.query(model) .filter(model.start.in_( select([func.max(model.start)]) )) ).first() @classmethod # query def find_by_name_and_time(cls, script, timestamp, trial=None, session=None): """Return the first trial according to script and timestamp Arguments: script -- specify the desired script timestamp -- specify the start of finish time of trial Keyword Arguments: trial -- limit query to a specific trial """ model = cls.m session = session or relational.session query = ( session.query(model) .filter( (model.script == script) & ( model.start.like(timestamp + "%") | model.finish.like(timestamp + "%") ) ).order_by(model.start) ) if trial: query = query.filter(model.id == trial) return proxy(query.first()) @classmethod # query def load_trial(cls, trial_ref, session=None): """Load trial by trial reference Find reference on trials id and tags name """ from .tag import Tag # avoid circular import session = session or relational.session return ( session.query(cls.m) .outerjoin(Tag.m) .filter((cls.m.id == trial_ref) | (Tag.m.name == trial_ref)) ).first() @classmethod # query def load_parent(cls, script, remove=True, parent_required=False, session=None): """Load head trial by script Keyword arguments: remove -- remove from head, after loading (default=True) parent_required -- valid only if script exists (default=False) session -- specify session for loading (default=relational.session) """ session = session or relational.session head = Head.load_head(script, session=session) if head: trial = head.trial if remove: Head.remove(head.id, session=relational.make_session()) elif not head: trial = cls.last_trial( script=script, parent_required=parent_required, session=session) return proxy(trial) @classmethod # query def fast_last_trial_id(cls, session=None): """Load last trial id that did not bypass modules Compile SQLAlchemy core query into string for optimization Keyword arguments: session -- specify session for loading (default=relational.session) """ session = session or relational.session if not hasattr(cls, "_last_trial_id"): ttrial = cls.t _query = ( select([ttrial.c.id]).where(ttrial.c.start.in_( select([func.max(ttrial.c.start)]) .select_from(ttrial) .where(is_none(ttrial.c.inherited_id)) )) ) cls.last_trial_id = str(_query) an_id = session.execute( cls.last_trial_id).fetchone() if not an_id: raise RuntimeError( "Not able to bypass modules check because no previous trial " "was found" ) return an_id[0] @classmethod # query def fast_update(cls, trial_id, finish, docstring, session=None): """Update finish time of trial Use core sqlalchemy Arguments: trial_id -- trial id finish -- finish time as a datetime object Keyword arguments: session -- specify session for loading (default=relational.session) """ session = session or relational.session ttrial = cls.t session.execute( ttrial.update() .values(finish=finish, docstring=docstring) .where(ttrial.c.id == trial_id) ) session.commit() @classmethod # query def store(cls, start, script, code_hash, arguments, bypass_modules, # pylint: disable=too-many-arguments command, run, docstring, session=None): """Create trial and assign a new id to it Use core sqlalchemy Arguments: start -- trial start time script -- script name code_hash -- script hash code arguments -- trial arguments bypass_modules -- whether it captured modules or not command -- the full command line with noWorkflow parametes run -- trial created by the run command Keyword arguments: session -- specify session for loading (default=relational.session) """ session = session or relational.session # ToDo: use core query parent = cls.load_parent(script, parent_required=True) parent_id = parent.id if parent else None inherited_id = None if bypass_modules: inherited_id = cls.fast_last_trial_id() ttrial = cls.__table__ result = session.execute( ttrial.insert(), {"start": start, "script": script, "code_hash": code_hash, "arguments": arguments, "command": command, "run": run, "inherited_id": inherited_id, "parent_id": parent_id, "docstring": docstring}) tid = result.lastrowid session.commit() return tid @classmethod # query def all(cls, session=None): """Return all trials Keyword arguments: session -- specify session for loading (default=relational.session) """ session = session or relational.session return proxy_gen(session.query(cls.m)) def match_status(self, status): """Check if trial statuses matches """ if status == "*": return True return self.status == status def match_script(self, script): """Check if trial scripts matches """ if script == "*": return True return self.script == script @property def str_start(self): """Return start date as string""" return str(self.start) @property def str_finish(self): """Return start date as string""" return str(self.finish) @classmethod # query def count(cls, session=None): """Count number of trials on database """ session = session or relational.session return session.query(cls.m).count()
35.357504
122
0.591549
[ "MIT" ]
raffaelfoidl/noworkflow
capture/noworkflow/now/persistence/models/trial.py
20,967
Python
import Tkinter as tk class Combobox(tk.Label): def __init__(self, master, choices=[], default=None, direction="down", arrowimage="default", **kwargs): style = {"relief": "groove", "bg":"white"} style.update(kwargs) tk.Label.__init__(self, master, **style) # options if direction not in ("down","up"): raise Exception("Direction must be either down or up") self.direction = direction self.choices = choices # entry self.entry = tk.Entry(self, bg=style["bg"], borderwidth=0) self.entry.pack(side="left", fill="y") if default != None: self.entry.insert(0, default) # dropdown arrow if arrowimage == "default": arrowimage = tk.PhotoImage(file="dropdown.gif") else: pass # image should be passed as a Photoimage self.arrow = tk.Label(self, bg=style["bg"], image=arrowimage) self.arrow.img = arrowimage self.arrow.pack(side="right") self.arrow.bind("<Button-1>", self.dropdown) def dropdown(self, event=None): self.arrow["relief"] = "sunken" self.entry.focus_force() self.entry.select_range(0, tk.END) menu = tk.Menu(self.entry, tearoff=0, bg="white") def changeentry(choice): self.entry.delete(0, tk.END) self.entry.insert(0, choice) self.rollup() if self.direction == "down": choices = self.choices elif self.direction == "up": choices = list(reversed(self.choices)) for choice in choices: menu.add_command(label=repr(choice).ljust(30), command=lambda x=choice: changeentry(x)) x = self.entry.winfo_rootx() if self.direction == "down": y = self.entry.winfo_rooty() + self.entry.winfo_height() elif self.direction == "up": y = self.entry.winfo_rooty() - menu.yposition(0) #menu.winfo_height() menu.post(x, y) def rollup(self, event=None): self.arrow["relief"] = "flat" if __name__ == "__main__": win = tk.Tk() OPTIONS = range(20) cbox = Combobox(win, choices=OPTIONS, default=12, direction="down") cbox.pack(side="left") cbox2 = Combobox(win, choices=OPTIONS, default=24, direction="up") cbox2.pack(side="left") win.mainloop()
35.953846
107
0.595208
[ "MIT" ]
karimbahgat/AutoMap
dependencies/generate maps/pythongis/app/tk2/_othermisc/dropdown_works.py
2,337
Python
from django.test import TestCase from ..company.models import Company from ..project.models import * from .models import * def create_contact_data(self): self.company = Company.objects.create(schema="test", name="Test") self.company.activate() self.project = Project.objects.create(name="my project") self.contact = Contact.objects.create(first_name="Ludwig", last_name="von Mises") class ContactProjectTests(TestCase): def setUp(self): create_contact_data(self) def test_no_association(self): self.assertEquals(0, len(self.contact.projects.all())) self.assertEquals(0, len(self.contact.project_contacts.all())) self.assertEquals(0, len(self.project.contacts.all())) self.assertEquals(0, len(self.project.project_contacts.all())) def test_customer_association(self): ProjectContact.objects.create( project=self.project, contact=self.contact, association=ProjectContact.CUSTOMER, ) self.assertEquals(1, len(self.contact.projects.all())) self.assertEquals(1, len(self.project.contacts.all())) pc = ProjectContact.objects.get(project=self.project) self.assertEquals(ProjectContact.CUSTOMER, pc.association) class BillableContactTests(TestCase): def setUp(self): self.company = Company.objects.create(schema="test", name="Test") self.company.activate() self.project = Project.objects.create(name="my project") self.pc1 = ProjectContact.objects.create( project=self.project, contact=Contact.objects.create(first_name="A 1", last_name="B 1"), ) self.pc2 = ProjectContact.objects.create( project=self.project, contact=Contact.objects.create(first_name="A 2", last_name="B 2"), ) def test_no_billable_set(self): self.assertEqual( 0, self.project.project_contacts.filter(is_billable=True).count() ) def test_billable_set(self): self.pc1.is_billable = True self.pc1.save() self.assertEqual( 1, self.project.project_contacts.filter(is_billable=True).count() ) def test_only_one_contact_can_be_billable(self): self.pc1.is_billable = True self.pc1.save() self.assertEqual( 1, self.project.project_contacts.filter(is_billable=True).count() ) self.assertEqual( self.pc1, self.project.project_contacts.filter(is_billable=True).get() ) self.pc2.is_billable = True self.pc2.save() self.assertEqual( 1, self.project.project_contacts.filter(is_billable=True).count() ) self.assertEqual( self.pc2, self.project.project_contacts.filter(is_billable=True).get() )
35.5
85
0.65493
[ "BSD-3-Clause" ]
systori/systori
systori/apps/directory/test_models.py
2,840
Python
#------------------Bombermans Team---------------------------------# # Author : B3mB4m # Concat : [email protected] # Project : https://github.com/b3mb4m/Shellsploit # LICENSE : https://github.com/b3mb4m/Shellsploit/blob/master/LICENSE #------------------------------------------------------------------# import sys import os from .core.color import * from re import findall from .core.Comp import tab from lib.base.framework import ShellsploitFramework if sys.version_info.major >= 3: raw_input = input class B3mB4m(ShellsploitFramework): def __init__(self): ShellsploitFramework.__init__(self) self.argvlist = ["None", "None", "None", "None"] self.disassembly = "None" self.mycache = "None" def control(self, string): bash = bcolors.OKBLUE + bcolors.UNDERLINE + "ssf" + bcolors.ENDC bash += ":" bash += bcolors.RED + string + bcolors.ENDC bash += bcolors.OKBLUE + " > " + bcolors.ENDC try: terminal = raw_input(bash) except KeyboardInterrupt: B3mB4m.exit("\n[*] (Ctrl + C ) Detected, Trying To Exit ...") # Injectors if string[:9] == "injectors": tab.completion("injectors") if terminal[:4] == "help": from .core.help import injectorhelp injectorhelp() self.control(string) elif terminal[:4] == "back": self.argvlist = ["None", "None", "None", "None"] pass # elif terminal[:9] == "need help": # import XX # print youtubelink for this module elif terminal[:4] == "exit": B3mB4m.exit("\nThanks for using shellsploit !\n") elif terminal[:4] == "pids": B3mB4m.pids("wholelist") self.control(string) elif terminal[:6] == "getpid": B3mB4m.pids(None, terminal[7:]) self.control(string) elif terminal[:5] == "clear": B3mB4m.clean() self.control(string) elif terminal[:5] == "unset": if string in B3mB4m.bfdlist(): if terminal[6:] == "exe" or terminal[6:] == "file": self.argvlist[0] = "None" elif terminal[6:] == "host": self.argvlist[1] = "None" elif terminal[6:] == "port": self.argvlist[2] = "None" else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) elif string == "injectors/Windows/x86/tLsInjectorDLL": if terminal[6:] == "exe": self.argvlist[0] = "None" elif terminal[6:] == "dll": self.argvlist[1] = "None" else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) elif string == "injectors/Windows/x86/CodecaveInjector": if terminal[6:] == "exe": self.argvlist[0] = "None" elif terminal[6:] == "shellcode": self.argvlist[1] = "None" else: if terminal[6:] == "pid": self.argvlist[0] = "None" elif terminal[6:] == "shellcode": self.argvlist[1] = "None" else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) self.control(string) elif terminal[:3] == "set": if string in B3mB4m.bfdlist(): if terminal[4:7] == "exe" or terminal[4:8] == "file": self.argvlist[0] = terminal[9:] elif terminal[4:8] == "host": self.argvlist[1] = terminal[9:] elif terminal[4:8] == "port": self.argvlist[2] = terminal[9:] else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) elif string == "injectors/Windows/x86/tLsInjectorDLL": if terminal[4:7] == "exe": self.argvlist[0] = terminal[8:] elif terminal[4:7] == "dll": self.argvlist[1] = terminal[8:] else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) elif string == "injectors/Windows/x86/CodecaveInjector": if terminal[4:7] == "exe": self.argvlist[0] = terminal[8:] elif terminal[4:13] == "shellcode": self.argvlist[1] = terminal[14:] else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) else: if terminal[4:7] == "pid": self.argvlist[0] = terminal[8:] elif terminal[4:13] == "shellcode": if ".txt" in terminal[14:]: if os.path.isfile(terminal[14:]): with open(terminal[14:], "r") as shellcode: cache = shellcode.readlines() db = "" for x in database: db += x.strip().replace('"', "").replace('+', "").strip() self.argvlist[1] = db else: print(bcolors.RED + bcolors.BOLD + "\nFile can't find, please try with full path.\n" + bcolors.ENDC) self.control(string) else: self.argvlist[1] = terminal[14:] else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) self.control(string) elif terminal[:14] == "show shellcode": if string in B3mB4m.bfdlist(): print("This option not available for this module.") self.control(string) elif string == "injectors/Windowsx86/tLsInjectorDLL": self.control(string) else: if self.argvlist[1] != "None": B3mB4m.prettyout(self.argvlist[1]) else: print("\nYou must set shellcode before this ..\b") self.control(string) elif terminal[:12] == "show options": from .core.Injectoroptions import controlset if string in B3mB4m.bfdlist(): controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2]) self.control(string) else: if string != "injectors/Windows/x86/tLsInjectorDLL": if self.argvlist[1] != "None": self.mycache = "process" controlset(string, self.argvlist[0], self.mycache) self.control(string) controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif terminal[:5] == "clear": B3mB4m.clean() self.control(string) elif terminal[:2] == "os": B3mB4m.oscommand(terminal[3:]) self.control(string) elif terminal[:6] == "inject": if self.argvlist[0] == None or self.argvlist[1] == None: print("\nYou must set pid/shellcode before inject !\n") self.control(string) if string == "injectors/Linux86/ptrace": from .inject.menager import linux86ptrace linux86ptrace(self.argvlist[0], self.argvlist[1]) elif string == "injectors/Linux64/ptrace": from .inject.menager import linux64ptrace linux64ptrace(self.argvlist[0], self.argvlist[1]) elif string == "injectors/Windows/byteman": from .inject.menager import windows windows(self.argvlist[0], self.argvlist[1]) elif string == "injectors/Windows/x86/tLsInjectorDLL": from .inject.menager import winx86tLsDLL winx86tLsDLL(self.argvlist[0], self.argvlist[1]) elif string == "injectors/Windows/x86/CodecaveInjector": from .inject.menager import winx86Codecave winx86Codecave(self.argvlist[0], self.argvlist[1]) elif string == "injectors/Windows/Dllinjector": from .inject.menager import winDLL winDLL(self.argvlist[0], self.argvlist[1]) elif string == "injectors/Windows/BFD/Patching": from .inject.menager import winBFD winBFD(self.argvlist[0], self.argvlist[1], int(self.argvlist[2])) # elif string == "injectors/MacOSX/BFD/Patching": # from .inject.menager import MacBFD # MacBFD( FILE, HOST, PORT) # elif string == "injectors/Linux/BFD/Patching": # from .inject.menager import LinuxBFD # LinuxBFD( FILE, HOST, PORT) # elif string == "injectors/Linux/ARM/x86/BFD/Patching": # from .inject.menager import LinuxARMx86BFD # LinuxARMx86BFD( FILE, HOST, PORT) # elif string == "FreeBSD/x86/BFD/Patching": # from .inject.menager import FreeBSDx86 # FreeBSDx86( FILE, HOST, PORT) self.control(string) # elif terminal[:7] == "extract": # Future option # Make it executable (Dynamic virus land) # from bla bla import executable # generator() elif terminal[:4] == "back": self.argvlist = ["None", "None", "None", "None"] pass else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) self.control(string) # Backdoors elif string[:9] == "backdoors": tab.completion("backdoors") if terminal[:4] == "help": from .core.help import backdoorshelp backdoorshelp() self.control(string) elif terminal[:4] == "exit": B3mB4m.exit("\nThanks for using shellsploit !\n") elif terminal[:2] == "os": B3mB4m.oscommand(terminal[3:]) self.control(string) elif terminal[:12] == "show options": from .core.SHELLoptions import controlset controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif terminal[:5] == "unset": if terminal[6:] == "lhost": self.argvlist[0] = "None" elif terminal[6:] == "lport": self.argvlist[1] = "None" # elif terminal[6:] == "encoder": # self.argvlist[2] = "None" else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) self.control(string) elif terminal[:3] == "set": if terminal[4:9].lower() == "lhost": self.argvlist[0] = terminal[10:] elif terminal[4:9].lower() == "lport": self.argvlist[1] = terminal[10:] # elif terminal[4:11].lower() == "encoder" # self.argvlist[2] = terminal[11:] else: print(bcolors.RED + bcolors.BOLD + "This option is not available." + bcolors.ENDC) self.control(string) elif terminal[:8] == "generate": from .Session.generator import process # Custom output path will be add .. if self.argvlist[0] == "None" or self.argvlist[1] == "None": print("\nSet options before generate payload.\n") self.control(string) else: process(data=string, HOST=self.argvlist[0], PORT=self.argvlist[1], ENCODER=False, logger=True) self.control(string) elif terminal[:5] == "clear": B3mB4m.clean() self.control(string) elif terminal[:4] == "back": self.argvlist = ["None", "None", "None", "None"] pass else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) self.control(string) # Shellcodes else: tab.completion("shellcodes") if terminal[:4] == "help": # if terminal[5:11] == "output": # from Outputs.exehelp import help # print help() # self.control( string) from .core.help import shellcodehelp shellcodehelp() self.control(string) elif terminal[:2] == "os": B3mB4m.oscommand(terminal[3:]) self.control(string) elif terminal[:4] == "back": self.argvlist = ["None", "None", "None", "None"] pass elif terminal[:4] == "exit": B3mB4m.exit("\nThanks for using shellsploit !\n") elif terminal[:10] == "whatisthis": from .core.whatisthis import whatisthis if "egg" in string: message = "Egg-hunt" elif "tcp" in string or "reverse" in string or "netcat" in string: message = "Remote" elif "download" in string: message = "Download and execute" else: message = "Local" # Add special part for particul whatisthis(message) self.control(string) elif terminal[:5] == "unset": if terminal[6:] == "encoder": self.argvlist[0] = "None" elif terminal[6:] == "iteration": self.argvlist[1] = "None" elif terminal[6:] == "file": if string in B3mB4m.readlist(): self.argvlist[2] = "None" else: B3mB4m.invalidcommand() elif terminal[6:] == "port": if string in B3mB4m.tcpbindlist() or string in B3mB4m.reversetcplist(): self.argvlist[2] = "None" else: Base.invalidcommand() elif terminal[6:] == "command": if string in B3mB4m.execlist(): self.argvlist[2] = "None" else: B3mB4m.invalidcommand() elif terminal[6:] == "link": if string in B3mB4m.downloadandexecutelist(): self.argvlist[2] = "None" else: B3mB4m.invalidcommand() elif terminal[6:] == "filename": if string in B3mB4m.downloadandexecutelist(): self.argvlist[3] = "None" else: B3mB4m.invalidcommand() elif terminal[6:] == "host": if string in B3mB4m.reversetcplist(): self.argvlist[3] = "None" else: B3mB4m.invalidcommand() else: B3mB4m.invalidcommand() self.control(string) elif terminal[:3] == "set": if terminal[4:8] == "file": if string in B3mB4m.readlist(): self.argvlist[2] = terminal[9:] else: B3mB4m.invalidcommand() elif terminal[4:8] == "port": if string in B3mB4m.tcpbindlist() or string in B3mB4m.reversetcplist(): self.argvlist[2] = terminal[9:] else: B3mB4m.invalidcommand() elif terminal[4:11] == "command": if string in B3mB4m.execlist(): self.argvlist[2] = terminal[12:] else: B3mB4m.invalidcommand() elif terminal[4:8] == "link": if string in B3mB4m.downloadandexecutelist(): self.argvlist[2] = terminal[9:] else: B3mB4m.invalidcommand() elif terminal[4:11] == "message": if string in B3mB4m.messageboxlist(): self.argvlist[2] = terminal[12:] else: B3mB4m.invalidcommand() elif terminal[4:8] == "host": if string in B3mB4m.reversetcplist(): self.argvlist[3] = terminal[9:] else: B3mB4m.invalidcommand() elif terminal[4:12] == "filename": if string in B3mB4m.downloadandexecutelist(): self.argvlist[3] = terminal[13:] else: B3mB4m.invalidcommand() elif terminal[4:11] == "encoder": from .core.lists import encoders if terminal[12:] not in encoders(): print("This encoder not in list !") self.control(string) self.argvlist[0] = terminal[12:] elif terminal[4:13] == "iteration": self.argvlist[1] = terminal[14:] else: B3mB4m.invalidcommand() self.control(string) elif terminal[:12] == "show options": from .core.SHELLoptions import controlset if string[:7] == "linux86": if string == "linux86/read": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux86/chmod": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux86/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux86/reverse_tcp": controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux86/download&exec": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux86/exec": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:10] == "solarisx86": if string == "solarisx86/read": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "solarisx86/reverse_tcp": controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "solarisx86/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:7] == "linux64": if string == "linux64/read": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux64/mkdir": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux64/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux64/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[1], self.argvlist[0]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:5] == "linux": if string == "linux/read": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:5] == "osx86": if string == "osx86/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "osx86/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[1], self.argvlist[0]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:5] == "osx64": if string == "osx64/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "osx64/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:11] == "freebsd_x86": if string == "freebsd_x86/reverse_tcp2": controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "freebsd_x86/reverse_tcp": controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "freebsd_x86/read": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "freebsd_x86/exec": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "freebsd_x86/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:11] == "freebsd_x64": if string == "freebsd_x64/tcp_bind": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3]) elif string == "freebsd_x64/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1]) elif string == "freebsd_x64/exec": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:9] == "linux_arm": if string == "linux_arm/chmod": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2]) elif string == "linux_arm/exec": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2]) elif string == "linux_arm/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:10] == "linux_mips": if string == "linux_mips/chmod": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2]) elif string == "linux_mips/reverse_tcp": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3]) elif string == "linux_mips/tcp_bind": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:7] == "windows": if string == "windows/messagebox": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "windows/exec": controlset(string, self.argvlist[1], self.argvlist[0], self.argvlist[2]) elif string == "windows/download&execute": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3]) elif string == "windows/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1]) elif string == "windows/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) self.control(string) elif terminal[:8] == "generate": from .database.generator import generator if string[:7] == "linux86": if string == "linux86/binsh_spawn": self.disassembly = generator("linux86", "binsh_spawn") elif string == "linux86/read": if self.argvlist[2] == "None": print("\nFile name must be declared.\n") self.control(string) self.disassembly = generator("linux86", "read", FILE=self.argvlist[2]) elif string == "linux86/exec": if self.argvlist[2] == "None": print("\nCommand must be declared.\n") self.control(string) self.disassembly = generator("linux86", "exec", COMMAND=self.argvlist[2]) elif string == "linux86/download&exec": if self.argvlist[2] == "None": print("\nLink must be declared.\n") self.control(string) elif "/" not in self.argvlist[2]: print("\nWrong url format example : 127.0.0.1/X\n") self.control(string) elif len(self.argvlist[2].split("/")[-1]) != 1: print("\nYour filename must be one lenght ..\n") self.control(string) if "http" in self.argvlist[2] or "https" in self.argvlist[2] or "www." in self.argvlist: try: edit = self.argvlist[2].replace("http://", "").replace("https://", "").replace("www.", "") self.argvlist[2] = edit except: pass self.disassembly = generator("linux86", "download&exec", URL=self.argvlist[2]) elif string == "linux86/chmod": if self.argvlist[2] == "None": print("\nFile name must be declared.\n") self.control(string) self.disassembly = generator("linux86", "chmod", FILE=self.argvlist[2]) elif string == "linux86/tcp_bind": if self.argvlist[2] == "None": print("\nPORT must be declared.\n") self.control(string) self.disassembly = generator("linux86", "tcp_bind", port=self.argvlist[2]) elif string == "linux86/reverse_tcp": if self.argvlist[2] == "None" or self.argvlist[3] == "None": print("\nHost&Port must be declared.\n") self.control(string) self.disassembly = generator("linux86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string[:7] == "linux64": if string == "linux64/binsh_spawn": self.disassembly = generator("linux64", "binsh_spawn") elif string == "linux64/tcp_bind": self.disassembly = generator("linux64", "tcp_bind", port=self.argvlist[2]) elif string == "linux64/reverse_tcp": self.disassembly = generator("linux64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "linux64/read": self.disassembly = generator("linux64", "read", FILE=self.argvlist[2]) if string[:5] == "linux": if string == "linux/read": if self.argvlist[2] == "None": print("\nFile name must be declared.\n") self.control(string) self.disassembly = generator("linux", "read", FILE=self.argvlist[2]) elif string == "linux/binsh_spawn": self.disassembly = generator("linux", "binsh_spawn") elif string == "linux/tcp_bind": self.disassembly = generator("linux", "tcp_bind", port=self.argvlist[2]) elif string == "linux/reverse_tcp": self.disassembly = generator("linux", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string[:5] == "osx86": if string == "osx86/tcp_bind": self.disassembly = generator("osx86", "tcp_bind", port=self.argvlist[2]) elif string == "osx86/binsh_spawn": self.disassembly = generator("osx86", "binsh_spawn") elif string == "osx86/reverse_tcp": self.disassembly = generator("osx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string[:5] == "osx64": if string == "osx64/binsh_spawn": self.disassembly = generator("osx64", "binsh_spawn") elif string == "osx64/tcp_bind": self.disassembly = generator("osx64", "tcp_bind", port=self.argvlist[2]) elif string == "osx64/reverse_tcp": self.disassembly = generator("osx64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string[:11] == "freebsd_x86": if string == "freebsd_x86/binsh_spawn": self.disassembly = generator("freebsdx86", "binsh_spawn") elif string == "freebsd_x86/read": self.disassembly = generator("freebsdx86", "read", FILE=self.argvlist[2]) elif string == "freebsd_x86/reverse_tcp": self.disassembly = generator("freebsdx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "freebsd_x86/reverse_tcp2": self.disassembly = generator("freebsdx86", "reverse_tcp2", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "freebsd_x86/exec": self.disassembly = generator("freebsdx86", "exec", COMMAND=self.argvlist[2]) elif string == "freebsd_x86/tcp_bind": self.disassembly = generator("freebsdx86", "tcp_bind", port=self.argvlist[2]) elif string[:11] == "freebsd_x64": if string == "freebsd_x64/binsh_spawn": self.disassembly = generator("freebsdx64", "binsh_spawn") elif string == "freebsd_x64/tcp_bind": self.disassembly = generator("freebsdx64", "tcp_bind", port=self.argvlist[2], PASSWORD=self.argvlist[3]) elif string == "freebsd_x64/reverse_tcp": self.disassembly = generator("freebsdx64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "freebsd_x64/exec": self.disassembly = generator("freebsdx64", "exec", COMMAND=self.argvlist[2]) elif string[:9] == "linux_arm": if string == "linux_arm/chmod": self.disassembly = generator("linux_arm", "chmod", FILE=self.argvlist[2]) elif string == "linux_arm/binsh_spawn": self.disassembly = generator("linux_arm", "binsh_spawn") elif string == "linux_arm/reverse_tcp": self.disassembly = generator("linux_arm", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "linux_arm/exec": self.disassembly = generator("linux_arm", "exec", COMMAND=self.argvlist[2]) elif string[:10] == "linux_mips": if string == "linux_mips/reverse_tcp": self.disassembly = generator("linux_mips", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "linux_mips/binsh_spawn": self.disassembly = generator("linux_mips", "binsh_spawn") elif string == "linux_mips/chmod": self.disassembly = generator("linux_mips", "chmod", FILE=self.argvlist[2]) elif string == "linux_mips/tcp_bind": self.disassembly = generator("linux_mips", "tcp_bind", port=self.argvlist[2]) elif string[:7] == "windows": if string == "windows/messagebox": self.disassembly = generator("windows", "messagebox", MESSAGE=self.argvlist[2]) elif string == "windows/download&execute": self.disassembly = generator("windows", "downloandandexecute", URL=self.argvlist[2], FILENAME=self.argvlist[3]) elif string == "windows/exec": self.disassembly = generator("windows", "exec", COMMAND=self.argvlist[2]) elif string == "windows/reverse_tcp": self.disassembly = generator("windows", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "windows/tcp_bind": self.disassembly = generator("windows", "tcp_bind", port=self.argvlist[2]) elif string[:10] == "solarisx86": if string == "solarisx86/binsh_spawn": self.disassembly = generator("solarisx86", "binsh_spawn") elif string == "solarisx86/read": if self.argvlist[2] == "None": print("\nFile name must be declared.\n") self.control(string) self.disassembly = generator("solarisx86", "read", FILE=self.argvlist[2]) elif string == "solarisx86/reverse_tcp": self.disassembly = generator("solarisx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "solarisx86/tcp_bind": self.disassembly = generator("solarisx86", "tcp_bind", port=self.argvlist[2]) if self.argvlist[0] == "x86/xor_b3m": from .encoders.shellcode.xor_b3m import prestart if self.argvlist[1] == "None": self.argvlist[1] = 1 elif self.argvlist[1] == 0: self.argvlist[1] = 1 self.disassembly = prestart(self.disassembly.replace("\\x", ""), int(self.argvlist[1])) elif self.argvlist[0] == "x86/xor": from .encoders.shellcode.xor import prestart if self.argvlist[1] == "None": self.argvlist[1] = 1 elif self.argvlist[1] == 0: self.argvlist[1] = 1 self.disassembly = prestart(self.disassembly.replace("\\x", ""), int(self.argvlist[1])) else: self.disassembly = self.disassembly # print "\n"+"Shellcode Lenght : %d" % len(str(bytearray(self.disassembly.replace("\\x", "").decode("hex")))) B3mB4m.prettyout(self.disassembly) self.control(string) elif terminal[:6] == "output": if self.disassembly == "None": print("Please generate shellcode before save it.") self.control(string) # I'm not sure about this option, should I get this option with params # Or directly inputs ? .. if terminal[7:10].lower() == "exe": # Will be add missing parts .. if "linux86" in terminal.lower(): OS = "linux86" elif "linux64" in terminal.lower(): OS = "linux64" elif "windows" in terminal.lower(): OS = "windows" elif "freebsdx86" in terminal.lower(): OS = "freebsdx86" elif "freebsdx64" in terminal.lower(): OS = "freebsdx64" elif "openbsdx86" in terminal.lower(): OS = "openbsdx86" elif "solarisx86" in terminal.lower(): OS = "solarisx86" elif "linuxpowerpc" in terminal.lower(): OS = "linuxpowerpc" elif "openbsdpowerpc" in terminal.lower(): OS = "openbsdpowerpc" elif "linuxsparc" in terminal.lower(): OS = "linuxsparc" elif "freebsdsparc" in terminal.lower(): OS = "freebsdsparc" elif "openbsdsparc" in terminal.lower(): OS = "openbsdsparc" elif "solarissparc" in terminal.lower(): OS = "solarissparc" elif "linuxarm" in terminal.lower(): OS = "linuxarm" elif "freebsdarm" in terminal.lower(): OS = "freebsdarm" elif "openbsdarm" in terminal.lower(): OS = "openbsdarm" else: OS = None from .Outputs.exe import ExeFile ExeFile(self.disassembly, OS) self.control(string) elif terminal[7:10].lower() == "c++" or terminal[7:10].lower() == "cpp": from .Outputs.Cplusplus import CplusplusFile if "windows" in string: CplusplusFile(self.disassembly, True) else: CplusplusFile(self.disassembly) elif terminal[7:8].lower() == "c": if "windows" in string: from .Outputs.Cplusplus import CplusplusFile CplusplusFile(self.disassembly, True) else: from .Outputs.C import CFile CFile(self.disassembly) elif terminal[7:9].lower() == "py" or terminal[7:13].lower() == "python": from .Outputs.python import PyFile PyFile(self.disassembly) elif terminal[7:10].lower() == "txt": from .Outputs.txt import TxtFile TxtFile(self.disassembly) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown output type: {0}".format(terminal) + bcolors.ENDC) self.control(string) elif terminal[:5] == "clear": B3mB4m.clean() self.control(string) elif terminal[:2].lower() == "ip": B3mB4m.IP() self.control(string) elif terminal[:13] == "show encoders": from .core.lists import encoderlist encoderlist() self.control(string) elif terminal[:5] == "disas": B3mB4m().startdisas( self.disassembly, string) self.control(string) else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) self.control(string)
50.883641
140
0.466683
[ "MIT" ]
An-spectator/shellsploit-framework
shell/control.py
44,167
Python
import numpy as np from .State import State from .Action import Action ''' Includes blood glucose level proxy for diabetes: 0-3 (lo2, lo1, normal, hi1, hi2); Any other than normal is "abnormal" Initial distribution: [.05, .15, .6, .15, .05] for non-diabetics and [.01, .05, .15, .6, .19] for diabetics Effect of vasopressors on if diabetic: raise blood pressure: normal -> hi w.p. .9, lo -> normal w.p. .5, lo -> hi w.p. .4 raise blood glucose by 1 w.p. .5 Effect of vasopressors off if diabetic: blood pressure falls by 1 w.p. .05 instead of .1 glucose does not fall - apply fluctuations below instead Fluctuation in blood glucose levels (IV/insulin therapy are not possible actions): fluctuate w.p. .3 if diabetic fluctuate w.p. .1 if non-diabetic Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4530321/ Additional fluctuation regardless of other changes This order is applied: antibiotics, ventilation, vasopressors, fluctuations ''' class MDP(object): def __init__(self, init_state_idx=None, init_state_idx_type='obs', policy_array=None, policy_idx_type='obs', p_diabetes=0.2): ''' initialize the simulator ''' assert p_diabetes >= 0 and p_diabetes <= 1, \ "Invalid p_diabetes: {}".format(p_diabetes) assert policy_idx_type in ['obs', 'full', 'proj_obs'] # Check the policy dimensions (states x actions) if policy_array is not None: assert policy_array.shape[1] == Action.NUM_ACTIONS_TOTAL if policy_idx_type == 'obs': assert policy_array.shape[0] == State.NUM_OBS_STATES elif policy_idx_type == 'full': assert policy_array.shape[0] == \ State.NUM_HID_STATES * State.NUM_OBS_STATES elif policy_idx_type == 'proj_obs': assert policy_array.shape[0] == State.NUM_PROJ_OBS_STATES # p_diabetes is used to generate random state if init_state is None self.p_diabetes = p_diabetes self.state = None # Only need to use init_state_idx_type if you are providing a state_idx! self.state = self.get_new_state(init_state_idx, init_state_idx_type) self.policy_array = policy_array self.policy_idx_type = policy_idx_type # Used for mapping the policy to actions def get_new_state(self, state_idx = None, idx_type = 'obs', diabetic_idx = None): ''' use to start MDP over. A few options: Full specification: 1. Provide state_idx with idx_type = 'obs' + diabetic_idx 2. Provide state_idx with idx_type = 'full', diabetic_idx is ignored 3. Provide state_idx with idx_type = 'proj_obs' + diabetic_idx* * This option will set glucose to a normal level Random specification 4. State_idx, no diabetic_idx: Latter will be generated 5. No state_idx, no diabetic_idx: Completely random 6. No state_idx, diabetic_idx given: Random conditional on diabetes ''' assert idx_type in ['obs', 'full', 'proj_obs'] option = None if state_idx is not None: if idx_type == 'obs' and diabetic_idx is not None: option = 'spec_obs' elif idx_type == 'obs' and diabetic_idx is None: option = 'spec_obs_no_diab' diabetic_idx = np.random.binomial(1, self.p_diabetes) elif idx_type == 'full': option = 'spec_full' elif idx_type == 'proj_obs' and diabetic_idx is not None: option = 'spec_proj_obs' elif state_idx is None and diabetic_idx is None: option = 'random' elif state_idx is None and diabetic_idx is not None: option = 'random_cond_diab' assert option is not None, "Invalid specification of new state" if option in ['random', 'random_cond_diab']: init_state = self.generate_random_state(diabetic_idx) # Do not start in death or discharge state while init_state.check_absorbing_state(): init_state = self.generate_random_state(diabetic_idx) else: # Note that diabetic_idx will be ignored if idx_type = 'full' init_state = State( state_idx=state_idx, idx_type=idx_type, diabetic_idx=diabetic_idx) return init_state def generate_random_state(self, diabetic_idx=None): # Note that we will condition on diabetic idx if provided if diabetic_idx is None: diabetic_idx = np.random.binomial(1, self.p_diabetes) # hr and sys_bp w.p. [.25, .5, .25] hr_state = np.random.choice(np.arange(3), p=np.array([.25, .5, .25])) sysbp_state = np.random.choice(np.arange(3), p=np.array([.25, .5, .25])) # percoxyg w.p. [.2, .8] percoxyg_state = np.random.choice(np.arange(2), p=np.array([.2, .8])) if diabetic_idx == 0: glucose_state = np.random.choice(np.arange(5), \ p=np.array([.05, .15, .6, .15, .05])) else: glucose_state = np.random.choice(np.arange(5), \ p=np.array([.01, .05, .15, .6, .19])) antibiotic_state = 0 vaso_state = 0 vent_state = 0 state_categs = [hr_state, sysbp_state, percoxyg_state, glucose_state, antibiotic_state, vaso_state, vent_state] return State(state_categs=state_categs, diabetic_idx=diabetic_idx) def transition_antibiotics_on(self): ''' antibiotics state on heart rate, sys bp: hi -> normal w.p. .5 ''' self.state.antibiotic_state = 1 if self.state.hr_state == 2 and np.random.uniform(0,1) < 0.5: self.state.hr_state = 1 if self.state.sysbp_state == 2 and np.random.uniform(0,1) < 0.5: self.state.sysbp_state = 1 def transition_antibiotics_off(self): ''' antibiotics state off if antibiotics was on: heart rate, sys bp: normal -> hi w.p. .1 ''' if self.state.antibiotic_state == 1: if self.state.hr_state == 1 and np.random.uniform(0,1) < 0.1: self.state.hr_state = 2 if self.state.sysbp_state == 1 and np.random.uniform(0,1) < 0.1: self.state.sysbp_state = 2 self.state.antibiotic_state = 0 def transition_vent_on(self): ''' ventilation state on percent oxygen: low -> normal w.p. .7 ''' self.state.vent_state = 1 if self.state.percoxyg_state == 0 and np.random.uniform(0,1) < 0.7: self.state.percoxyg_state = 1 def transition_vent_off(self): ''' ventilation state off if ventilation was on: percent oxygen: normal -> lo w.p. .1 ''' if self.state.vent_state == 1: if self.state.percoxyg_state == 1 and np.random.uniform(0,1) < 0.1: self.state.percoxyg_state = 0 self.state.vent_state = 0 def transition_vaso_on(self): ''' vasopressor state on for non-diabetic: sys bp: low -> normal, normal -> hi w.p. .7 for diabetic: raise blood pressure: normal -> hi w.p. .9, lo -> normal w.p. .5, lo -> hi w.p. .4 raise blood glucose by 1 w.p. .5 ''' self.state.vaso_state = 1 if self.state.diabetic_idx == 0: if np.random.uniform(0,1) < 0.7: if self.state.sysbp_state == 0: self.state.sysbp_state = 1 elif self.state.sysbp_state == 1: self.state.sysbp_state = 2 else: if self.state.sysbp_state == 1: if np.random.uniform(0,1) < 0.9: self.state.sysbp_state = 2 elif self.state.sysbp_state == 0: up_prob = np.random.uniform(0,1) if up_prob < 0.5: self.state.sysbp_state = 1 elif up_prob < 0.9: self.state.sysbp_state = 2 if np.random.uniform(0,1) < 0.5: self.state.glucose_state = min(4, self.state.glucose_state + 1) def transition_vaso_off(self): ''' vasopressor state off if vasopressor was on: for non-diabetics, sys bp: normal -> low, hi -> normal w.p. .1 for diabetics, blood pressure falls by 1 w.p. .05 instead of .1 ''' if self.state.vaso_state == 1: if self.state.diabetic_idx == 0: if np.random.uniform(0,1) < 0.1: self.state.sysbp_state = max(0, self.state.sysbp_state - 1) else: if np.random.uniform(0,1) < 0.05: self.state.sysbp_state = max(0, self.state.sysbp_state - 1) self.state.vaso_state = 0 def transition_fluctuate(self, hr_fluctuate, sysbp_fluctuate, percoxyg_fluctuate, \ glucose_fluctuate): ''' all (non-treatment) states fluctuate +/- 1 w.p. .1 exception: glucose flucuates +/- 1 w.p. .3 if diabetic ''' if hr_fluctuate: hr_prob = np.random.uniform(0,1) if hr_prob < 0.1: self.state.hr_state = max(0, self.state.hr_state - 1) elif hr_prob < 0.2: self.state.hr_state = min(2, self.state.hr_state + 1) if sysbp_fluctuate: sysbp_prob = np.random.uniform(0,1) if sysbp_prob < 0.1: self.state.sysbp_state = max(0, self.state.sysbp_state - 1) elif sysbp_prob < 0.2: self.state.sysbp_state = min(2, self.state.sysbp_state + 1) if percoxyg_fluctuate: percoxyg_prob = np.random.uniform(0,1) if percoxyg_prob < 0.1: self.state.percoxyg_state = max(0, self.state.percoxyg_state - 1) elif percoxyg_prob < 0.2: self.state.percoxyg_state = min(1, self.state.percoxyg_state + 1) if glucose_fluctuate: glucose_prob = np.random.uniform(0,1) if self.state.diabetic_idx == 0: if glucose_prob < 0.1: self.state.glucose_state = max(0, self.state.glucose_state - 1) elif glucose_prob < 0.2: self.state.glucose_state = min(1, self.state.glucose_state + 1) else: if glucose_prob < 0.3: self.state.glucose_state = max(0, self.state.glucose_state - 1) elif glucose_prob < 0.6: self.state.glucose_state = min(4, self.state.glucose_state + 1) def calculateReward(self): num_abnormal = self.state.get_num_abnormal() if num_abnormal >= 3: return -1 elif num_abnormal == 0 and not self.state.on_treatment(): return 1 return 0 def transition(self, action): self.state = self.state.copy_state() if action.antibiotic == 1: self.transition_antibiotics_on() hr_fluctuate = False sysbp_fluctuate = False elif self.state.antibiotic_state == 1: self.transition_antibiotics_off() hr_fluctuate = False sysbp_fluctuate = False else: hr_fluctuate = True sysbp_fluctuate = True if action.ventilation == 1: self.transition_vent_on() percoxyg_fluctuate = False elif self.state.vent_state == 1: self.transition_vent_off() percoxyg_fluctuate = False else: percoxyg_fluctuate = True glucose_fluctuate = True if action.vasopressors == 1: self.transition_vaso_on() sysbp_fluctuate = False glucose_fluctuate = False elif self.state.vaso_state == 1: self.transition_vaso_off() sysbp_fluctuate = False self.transition_fluctuate(hr_fluctuate, sysbp_fluctuate, percoxyg_fluctuate, \ glucose_fluctuate) return self.calculateReward() def select_actions(self): assert self.policy_array is not None probs = self.policy_array[ self.state.get_state_idx(self.policy_idx_type) ] aev_idx = np.random.choice(np.arange(Action.NUM_ACTIONS_TOTAL), p=probs) return Action(action_idx = aev_idx)
39.869427
89
0.584072
[ "MIT" ]
GuyLor/gumbel_max_causal_gadgets_part2
sepsisSimDiabetes/MDP.py
12,519
Python
import random import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error as mse from sklearn import datasets import unittest import torch from fastai.basic_train import Learner from fastai.callbacks import OneCycleScheduler from fastai.basic_data import DatasetType from dies.data import ( ds_from_df_from_dtypes, scale_datasets, create_databunch, ds_from_df, ) from dies import data from dies.mlp import MultiLayerPeceptron from dies.embedding import Embedding from dies.utils_pytorch import dev_to_np, xavier_init_uniform from dies.autoencoder import Autoencoder random_state = 0 def set_random_states(): torch.manual_seed(random_state) np.random.seed(random_state) random.seed(random_state) def get_df(): X, y, _ = datasets.make_regression( n_samples=50, n_features=2, bias=1000, n_informative=2, noise=10, coef=True, random_state=42, ) df1 = pd.DataFrame( data=np.concatenate([X, y.reshape(-1, 1)], axis=1), columns=["feat1", "feat2", "target"], ) cats = np.random.randint(low=0, high=10, size=(df1.shape[0], 2)) df1["cat_1"] = cats[:, 0] df1["cat_2"] = cats[:, 1] index1 = pd.date_range("2000-01-01", "2000-06-01", periods=df1.shape[0]) index1 = pd.to_datetime(index1, utc=True) df1.index = index1 return df1 class TestMLP(unittest.TestCase): def setUp(self): n_features = 3 device = "cpu" df = get_df() ds = ds_from_df_from_dtypes(df, "target") self.ds_tr, self.ds_val, _ = data.train_test_split_dataset(ds) self.db = create_databunch( self.ds_tr, self.ds_val, None, batch_size=40, device="cpu" ) set_random_states() def test_simple_mlp(self): input_size = self.ds_tr.x.shape[1] df_tr = self.ds_tr.to_df() ann_model = MultiLayerPeceptron( input_size, ann_structure=[2, 1], embedding_module=None, dropout=0.1 ) ann_model.apply(xavier_init_uniform) learn = Learner(self.db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_init = mse(df_tr.target, y_hat) learn.fit(1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr.target, y_hat) self.assertLess(e_end, e_init) def test_mlp_with_yrange(self): input_size = self.ds_tr.x.shape[1] df_tr = self.ds_tr.to_df() y_ranges = self.ds_tr.y_ranges ann_model = MultiLayerPeceptron( input_size, ann_structure=[2, 1], embedding_module=None, dropout=0.1, y_ranges=y_ranges, ) ann_model.apply(xavier_init_uniform) learn = Learner(self.db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_init = mse(df_tr.target, y_hat) learn.fit(1, lr=0.1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr.target, y_hat) self.assertLess(e_end, e_init) def test_simple_mlp_with_embedding(self): input_size = self.ds_tr.x.shape[1] df_tr = self.ds_tr.to_df() embedding_module = Embedding([11, 11], embedding_dropout=0.1) ann_model = MultiLayerPeceptron( input_size, ann_structure=[2, 1], embedding_module=embedding_module, dropout=0.1, ) ann_model.apply(xavier_init_uniform) learn = Learner(self.db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_init = mse(df_tr.target, y_hat) learn.fit(1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr.target, y_hat) self.assertLess(e_end, e_init) def test_true(self): self.assertTrue(True) class TestAE(unittest.TestCase): def setUp(self): n_features = 3 device = "cpu" self.df = get_df() self.df.drop("target", axis=1, inplace=True) set_random_states() def test_simple_ae(self): cols = ["feat1", "feat2"] ds = ds_from_df(self.df, y_columns=cols, x_columns=cols) ds_tr, ds_val, _ = data.train_test_split_dataset(ds) db = create_databunch(ds_tr, ds_val, None, batch_size=40, device="cpu") df_tr = ds_tr.to_df() input_size = ds_tr.x.shape[1] print(input_size, ds_tr.y.shape[1]) ann_structure = [10, 4, 1] ann_model = Autoencoder(input_size=input_size, ann_structure=ann_structure) ann_model.apply(xavier_init_uniform) learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) target_cols = ["feat1_target", "feat2_target"] e_init = mse(df_tr[target_cols].values, y_hat) learn.fit(1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr[target_cols].values, y_hat) self.assertLess(e_end, e_init) def test_ae_with_yranges(self): cols = ["feat1", "feat2"] ds = ds_from_df(self.df, y_columns=cols, x_columns=cols) ds_tr, ds_val, _ = data.train_test_split_dataset(ds) db = create_databunch(ds_tr, ds_val, None, batch_size=40, device="cpu") df_tr = ds_tr.to_df() input_size = ds_tr.x.shape[1] print(input_size, ds_tr.y.shape[1]) ann_structure = [10, 4, 1] y_ranges = ds_tr.y_ranges ann_model = Autoencoder( input_size=input_size, ann_structure=ann_structure, y_ranges=y_ranges ) ann_model.apply(xavier_init_uniform) set_random_states() learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) target_cols = ["feat1_target", "feat2_target"] e_init = mse(df_tr[target_cols].values, y_hat) learn.fit(1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr[target_cols].values, y_hat) self.assertLess(e_end, e_init) def test_ae_with_embedding_and_yrange(self): cols = ["feat1", "feat2"] ds = ds_from_df( self.df, y_columns=cols, x_columns=cols, cat_columns=["cat_1", "cat_2"] ) ds_tr, ds_val, _ = data.train_test_split_dataset(ds) db = create_databunch(ds_tr, ds_val, None, batch_size=40, device="cpu") df_tr = ds_tr.to_df() y_ranges = ds_tr.y_ranges input_size = ds_tr.x.shape[1] print(input_size, ds_tr.y.shape[1]) ann_structure = [10, 4, 1] embedding_module = Embedding([11, 11], embedding_dropout=0.1) ann_model = Autoencoder( input_size=input_size, ann_structure=ann_structure, embedding_module=embedding_module, embeding_position="start", y_ranges=y_ranges, ) set_random_states() ann_model.apply(xavier_init_uniform) set_random_states() learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) target_cols = ["feat1_target", "feat2_target"] e_init = mse(df_tr[target_cols].values, y_hat) learn.fit(1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr[target_cols].values, y_hat) # adds some small tolerance self.assertLess( e_end, e_init + 0.05, ) def test_ae_with_embedding_at_start(self): cols = ["feat1", "feat2"] ds = ds_from_df( self.df, y_columns=cols, x_columns=cols, cat_columns=["cat_1", "cat_2"] ) ds_tr, ds_val, _ = data.train_test_split_dataset(ds) db = create_databunch(ds_tr, ds_val, None, batch_size=40, device="cpu") df_tr = ds_tr.to_df() input_size = ds_tr.x.shape[1] print(input_size, ds_tr.y.shape[1]) ann_structure = [10, 4, 1] embedding_module = Embedding([11, 11], embedding_dropout=0.1) ann_model = Autoencoder( input_size=input_size, ann_structure=ann_structure, embedding_module=embedding_module, embeding_position="start", ) ann_model.apply(xavier_init_uniform) learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) target_cols = ["feat1_target", "feat2_target"] e_init = mse(df_tr[target_cols].values, y_hat) learn.fit(1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr[target_cols].values, y_hat) self.assertLess(e_end, e_init) def test_ae_with_embedding_at_bottleneck(self): cols = ["feat1", "feat2"] ds = ds_from_df( self.df, y_columns=cols, x_columns=cols, cat_columns=["cat_1", "cat_2"] ) ds_tr, ds_val, _ = data.train_test_split_dataset(ds) db = create_databunch(ds_tr, ds_val, None, batch_size=40, device="cpu") df_tr = ds_tr.to_df() input_size = ds_tr.x.shape[1] print(input_size, ds_tr.y.shape[1]) ann_structure = [10, 4, 1] embedding_module = Embedding([11, 11], embedding_dropout=0.1) ann_model = Autoencoder( input_size=input_size, ann_structure=ann_structure, embedding_module=embedding_module, embeding_position="bottleneck", ) ann_model.apply(xavier_init_uniform) learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) target_cols = ["feat1_target", "feat2_target"] e_init = mse(df_tr[target_cols].values, y_hat) learn.fit(1, lr=0.1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr[target_cols].values, y_hat) self.assertLess(e_end, e_init) def test_true(self): self.assertTrue(True)
32.228125
83
0.632406
[ "MIT" ]
scribbler00/mtl-sps_ern-and-hps_taskembbedding
dies/dies/tests/test_regression.py
10,313
Python
import argparse import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from net.tf_net import \ calculate_accuracy, calculate_loss, \ create_simple_cnn_model, optimize_weights from net.keras_net import simple_cnn def train_keras(batch_size, epochs, n_classes): # x_train returns data with shape (60,000,28,28) # y_train returns data with shape (60,000,) (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() # add one dimension for color chanel (only gray values) x_train = x_train.reshape(x_train.shape[0], image_height, image_width, 1) x_test = x_test.reshape(x_test.shape[0], image_height, image_width, 1) # define input shape of image input_shape = (image_height, image_width, 1) # convert tensor to float x_train = x_train.astype('float32') x_test = x_test.astype('float32') # normalize data: divide by 255 (max color value) to receive values between 0 and 1 x_train /= 255 x_test /= 255 # one-hot encoding: converts into array of length 'n_classes' and sets one where true # e.g. label = 5 y_train[4]=1, rest is 0 y_train = tf.keras.utils.to_categorical(y_train, n_classes) y_test = tf.keras.utils.to_categorical(y_test, n_classes) simple_cnn_model = simple_cnn(input_shape) simple_cnn_model.fit(x_train, y_train, batch_size, epochs, (x_test, y_test)) train_loss, train_accuracy = simple_cnn_model.evaluate( x_train, y_train, verbose=0) print('Train data loss:', train_loss) print('Train data accuracy:', train_accuracy) test_loss, test_accuracy = simple_cnn_model.evaluate( x_test, y_test, verbose=0) print('Test data loss:', test_loss) print('Test data accuracy:', test_accuracy) def train_tensorflow(batch_size, epochs, n_classes): mnist_data = input_data.read_data_sets('MNIST_data', one_hot=True) test_images, test_labels = mnist_data.test.images, mnist_data.test.labels input_size = 784 # declare placeholder x_input = tf.placeholder(tf.float32, shape=[None, input_size]) y_input = tf.placeholder(tf.float32, shape=[None, n_classes]) # if test set dropout to false bool_dropout = tf.placeholder(tf.bool) # create neural net and receive logits logits = create_simple_cnn_model(x_input, y_input, bool_dropout) # calculate loss, optimize weights and calculate accuracy loss_operation = calculate_loss(logits, y_input) optimizer = optimize_weights(loss_operation) accuracy_operation = calculate_accuracy(logits, y_input) # start training session = tf.Session() session.run(tf.global_variables_initializer()) # merge all summary for tensorboard merged_summary_operation = tf.summary.merge_all() train_summary_writer = tf.summary.FileWriter('/tmp/train', session.graph) test_summary_writer = tf.summary.FileWriter('/tmp/test') for batch_n in range(epochs): mnist_batch = mnist_data.train.next_batch(batch_size) train_images, train_labels = mnist_batch[0], mnist_batch[1] _, merged_summary = session.run([optimizer, merged_summary_operation], feed_dict={ x_input: train_images, y_input: train_labels, bool_dropout: True }) train_summary_writer.add_summary(merged_summary, batch_n) if batch_n % 10 == 0: merged_summary, _ = session.run([merged_summary_operation, accuracy_operation], feed_dict={ x_input: test_images, y_input: test_labels, bool_dropout: False }) test_summary_writer.add_summary(merged_summary, batch_n) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Train a simple neural net to recognize number images from the MNIST dataset and appy the correct labeld') parser.add_argument('--epochs', default=200, help='Amount of batches the net trains on') parser.add_argument('--batch_size', default=100, help='Number of training samples inside one batch') parser.add_argument('--tf', default=True, help='Tensorflow or Keras implementation') args = parser.parse_args() if(args.tf): train_tensorflow(args.batch_size, args.epochs, 10) else: train_keras(args.batch_size, args.epochs, args.n_classes)
38.355932
126
0.682943
[ "MIT" ]
na018/DeepLearning
00_MNIST-label/train.py
4,526
Python
#!/usr/bin/env python3 from matplotlib.patches import Circle, Rectangle, ConnectionPatch import matplotlib.pyplot as plt import numpy as np from matplotlib import animation from math import floor Colors = ['green', 'purple', 'orange', 'red', 'blue', 'yellow'] class Animation: def __init__(self, my_map, starts, goals, paths, predictions): self.my_map = np.flip(np.transpose(my_map), 1) self.predictions = predictions self.starts = [] for start in starts: self.starts.append((start[1], len(self.my_map[0]) - 1 - start[0])) self.goals = [] for goal in goals: self.goals.append((goal[1], len(self.my_map[0]) - 1 - goal[0])) self.paths = [] if paths: for path in paths: self.paths.append([]) for loc in path: self.paths[-1].append((loc[1], len(self.my_map[0]) - 1 - loc[0])) aspect = len(self.my_map) / len(self.my_map[0]) self.fig = plt.figure(frameon=False, figsize=(4 * aspect, 4)) self.ax = self.fig.add_subplot(111, aspect='equal') self.fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=None, hspace=None) # self.ax.set_frame_on(False) self.patches = [] self.artists = [] self.agents = dict() self.agent_names = dict() self.goal_predictions = dict() self.agent_goal_connections = dict() # create boundary patch x_min = -0.5 y_min = -0.5 x_max = len(self.my_map) - 0.5 y_max = len(self.my_map[0]) - 0.5 plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(np.arange(x_min, x_max, 1)) plt.yticks(np.arange(y_min, y_max, 1)) plt.grid(color='0.85') self.patches.append(Rectangle((x_min, y_min), x_max - x_min, y_max - y_min, facecolor='none', edgecolor='gray')) for i in range(len(self.my_map)): for j in range(len(self.my_map[0])): if self.my_map[i][j]: self.patches.append(Rectangle((i - 0.5, j - 0.5), 1, 1, facecolor='gray', edgecolor='gray')) self.T = 0 # draw goals for i, goal in enumerate(self.goals): goal_color = Colors[i % len(Colors)] self.patches.append(Rectangle((goal[0] - 0.25, goal[1] - 0.25), 0.5, 0.5, facecolor=goal_color, edgecolor='black', alpha=0.5)) # create agents for a in range(len(self.paths)): name = str(a) self.agents[a] = Circle((starts[a][0], starts[a][1]), 0.3, facecolor=Colors[a % len(Colors)], edgecolor='black') self.agents[a].original_face_color = Colors[a % len(Colors)] self.patches.append(self.agents[a]) self.T = max(self.T, len(paths[a]) - 1) self.agent_names[a] = self.ax.text(starts[a][0], starts[a][1] + 0.25, name) self.agent_names[a].set_horizontalalignment('center') self.agent_names[a].set_verticalalignment('center') self.artists.append(self.agent_names[a]) # connections & predictions self.goal_predictions[a] = dict() self.agent_goal_connections[a] = dict() for i, goal in enumerate(self.goals): goal_color = Colors[i % len(Colors)] self.goal_predictions[a][i] = self.ax.text(goal[0], goal[1], str(i)) self.goal_predictions[a][i].set_horizontalalignment('center') self.goal_predictions[a][i].set_verticalalignment('center') self.artists.append(self.goal_predictions[a][i]) self.agent_goal_connections[a][i] = plt.Line2D((start[1], goal[0]), (len(self.my_map[0]) - 1 - start[0], goal[1]), lw=2.5, color = goal_color) self.artists.append(self.agent_goal_connections[a][i]) self.animation = animation.FuncAnimation(self.fig, self.animate_func, init_func=self.init_func, frames=int(self.T + 1) * 10, interval=100, blit=True) def save(self, file_name, speed): self.animation.save( file_name, fps=10 * speed, dpi=200, savefig_kwargs={"pad_inches": 0}) @staticmethod def show(): plt.show() def init_func(self): for p in self.patches: self.ax.add_patch(p) for a in self.artists: self.ax.add_artist(a) return self.patches + self.artists def animate_func(self, t): # per ogni agente for a in range(len(self.paths)): pos = self.get_state(t / 10, self.paths[a]) self.agents[a].center = (pos[0], pos[1]) self.agent_names[a].set_position((pos[0], pos[1] + 0.5)) # per ogni goal for i in self.agent_goal_connections[a]: timestep = floor(t/10) if timestep not in self.predictions[a]: continue prediction = self.predictions[a][timestep][i] # Linee self.agent_goal_connections[a][i].set_data([pos[0], self.goals[i][0]], [pos[1], self.goals[i][1]]) self.agent_goal_connections[a][i].set_alpha(prediction) # Percentuali self.goal_predictions[a][i].set_text("{:.2f}".format(prediction*100)) self.goal_predictions[a][i].set_position([(pos[0] + self.goals[i][0])/2, (pos[1] + self.goals[i][1])/2]) self.goal_predictions[a][i].set_alpha(prediction) # reset all colors for _, agent in self.agents.items(): agent.set_facecolor(agent.original_face_color) # check drive-drive collisions agents_array = [agent for _, agent in self.agents.items()] for i in range(0, len(agents_array)): for j in range(i + 1, len(agents_array)): d1 = agents_array[i] d2 = agents_array[j] pos1 = np.array(d1.center) pos2 = np.array(d2.center) if np.linalg.norm(pos1 - pos2) < 0.7: d1.set_facecolor('red') d2.set_facecolor('red') print("COLLISION! (agent-agent) ({}, {}) at time {}".format(i, j, t/10)) return self.patches + self.artists @staticmethod def get_state(t, path): if int(t) <= 0: return np.array(path[0]) elif int(t) >= len(path): return np.array(path[-1]) else: pos_last = np.array(path[int(t) - 1]) pos_next = np.array(path[int(t)]) pos = (pos_next - pos_last) * (t - int(t)) + pos_last return pos
41.706587
158
0.535822
[ "CC0-1.0" ]
mirtorande/grpf-tool
visualize.py
6,965
Python
from sense_hat import SenseHat import time sense = SenseHat() while True: x, y, z = sense.get_accelerometer_raw().values() x=round(x * 100, 0) y=round(y * 100, 0) z=round(z * 100, 0) print("x=%s, y=%s, z=%s" % (x, y, z)) time.sleep(2)
17.6
52
0.57197
[ "CC0-1.0" ]
kajackdfw/python_rpi_sense_hat_demos
acceleration.py
264
Python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch import torch.nn.functional as F from torch import nn from fcos_core.structures.bounding_box import BoxList from fcos_core.structures.boxlist_ops import boxlist_nms from fcos_core.structures.boxlist_ops import cat_boxlist from fcos_core.modeling.box_coder import BoxCoder class PostProcessor(nn.Module): """ From a set of classification scores, box regression and proposals, computes the post-processed boxes, and applies NMS to obtain the final results """ def __init__( self, score_thresh=0.05, nms=0.5, detections_per_img=100, box_coder=None, cls_agnostic_bbox_reg=False, bbox_aug_enabled=False ): """ Arguments: score_thresh (float) nms (float) detections_per_img (int) box_coder (BoxCoder) """ super(PostProcessor, self).__init__() self.score_thresh = score_thresh self.nms = nms self.detections_per_img = detections_per_img if box_coder is None: box_coder = BoxCoder(weights=(10., 10., 5., 5.)) self.box_coder = box_coder self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg self.bbox_aug_enabled = bbox_aug_enabled def forward(self, x, boxes): """ Arguments: x (tuple[tensor, tensor]): x contains the class logits and the box_regression from the model. boxes (list[BoxList]): bounding boxes that are used as reference, one for ech image Returns: results (list[BoxList]): one BoxList for each image, containing the extra fields labels and scores """ class_logits, box_regression = x class_prob = F.softmax(class_logits, -1) # TODO think about a representation of batch of boxes image_shapes = [box.size for box in boxes] boxes_per_image = [len(box) for box in boxes] concat_boxes = torch.cat([a.bbox for a in boxes], dim=0) if self.cls_agnostic_bbox_reg: box_regression = box_regression[:, -4:] proposals = self.box_coder.decode( box_regression.view(sum(boxes_per_image), -1), concat_boxes ) if self.cls_agnostic_bbox_reg: proposals = proposals.repeat(1, class_prob.shape[1]) num_classes = class_prob.shape[1] proposals = proposals.split(boxes_per_image, dim=0) class_prob = class_prob.split(boxes_per_image, dim=0) results = [] for prob, boxes_per_img, image_shape in zip( class_prob, proposals, image_shapes ): boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape) boxlist = boxlist.clip_to_image(remove_empty=False) if not self.bbox_aug_enabled: # If bbox aug is enabled, we will do it later boxlist = self.filter_results(boxlist, num_classes) results.append(boxlist) return results def prepare_boxlist(self, boxes, scores, image_shape): """ Returns BoxList from `boxes` and adds probability scores information as an extra field `boxes` has shape (#detections, 4 * #classes), where each row represents a list of predicted bounding boxes for each of the object classes in the dataset (including the background class). The detections in each row originate from the same object proposal. `scores` has shape (#detection, #classes), where each row represents a list of object detection confidence scores for each of the object classes in the dataset (including the background class). `scores[i, j]`` corresponds to the box at `boxes[i, j * 4:(j + 1) * 4]`. """ boxes = boxes.reshape(-1, 4) scores = scores.reshape(-1) boxlist = BoxList(boxes, image_shape, mode="xyxy") boxlist.add_field("scores", scores) return boxlist def filter_results(self, boxlist, num_classes): """Returns bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). """ # unwrap the boxlist to avoid additional overhead. # if we had multi-class NMS, we could perform this directly on the boxlist boxes = boxlist.bbox.reshape(-1, num_classes * 4) scores = boxlist.get_field("scores").reshape(-1, num_classes) device = scores.device result = [] # Apply threshold on detection probabilities and apply NMS # Skip j = 0, because it's the background class inds_all = scores > self.score_thresh for j in range(1, num_classes): inds = inds_all[:, j].nonzero().squeeze(1) scores_j = scores[inds, j] boxes_j = boxes[inds, j * 4 : (j + 1) * 4] boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy") boxlist_for_class.add_field("scores", scores_j) boxlist_for_class = boxlist_nms( boxlist_for_class, self.nms ) num_labels = len(boxlist_for_class) boxlist_for_class.add_field( "labels", torch.full((num_labels,), j, dtype=torch.int64, device=device) ) result.append(boxlist_for_class) result = cat_boxlist(result) number_of_detections = len(result) # Limit to max_per_image detections **over all classes** if number_of_detections > self.detections_per_img > 0: cls_scores = result.get_field("scores") image_thresh, _ = torch.kthvalue( cls_scores.cpu(), number_of_detections - self.detections_per_img + 1 ) keep = cls_scores >= image_thresh.item() keep = torch.nonzero(keep).squeeze(1) result = result[keep] return result def make_roi_box_post_processor(cfg): use_fpn = cfg.MODEL.ROI_HEADS.USE_FPN bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS box_coder = BoxCoder(weights=bbox_reg_weights) score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH nms_thresh = cfg.MODEL.ROI_HEADS.NMS detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG bbox_aug_enabled = cfg.TEST.BBOX_AUG.ENABLED postprocessor = PostProcessor( score_thresh, nms_thresh, detections_per_img, box_coder, cls_agnostic_bbox_reg, bbox_aug_enabled ) return postprocessor
39.479769
89
0.623865
[ "BSD-2-Clause" ]
qilei123/FCOS
fcos_core/modeling/roi_heads/box_head/inference.py
6,830
Python
from __future__ import annotations import datetime from functools import partial from textwrap import dedent from typing import TYPE_CHECKING import warnings import numpy as np from pandas._libs.tslibs import Timedelta import pandas._libs.window.aggregations as window_aggregations from pandas._typing import ( Axis, TimedeltaConvertibleTypes, ) if TYPE_CHECKING: from pandas import DataFrame, Series from pandas.core.generic import NDFrame from pandas.compat.numpy import function as nv from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_datetime64_ns_dtype from pandas.core.dtypes.missing import isna import pandas.core.common as common # noqa: PDF018 from pandas.core.indexers.objects import ( BaseIndexer, ExponentialMovingWindowIndexer, GroupbyIndexer, ) from pandas.core.util.numba_ import maybe_use_numba from pandas.core.window.common import zsqrt from pandas.core.window.doc import ( _shared_docs, args_compat, create_section_header, kwargs_compat, numba_notes, template_header, template_returns, template_see_also, window_agg_numba_parameters, ) from pandas.core.window.numba_ import ( generate_ewma_numba_table_func, generate_numba_ewma_func, ) from pandas.core.window.online import ( EWMMeanState, generate_online_numba_ewma_func, ) from pandas.core.window.rolling import ( BaseWindow, BaseWindowGroupby, ) def get_center_of_mass( comass: float | None, span: float | None, halflife: float | None, alpha: float | None, ) -> float: valid_count = common.count_not_none(comass, span, halflife, alpha) if valid_count > 1: raise ValueError("comass, span, halflife, and alpha are mutually exclusive") # Convert to center of mass; domain checks ensure 0 < alpha <= 1 if comass is not None: if comass < 0: raise ValueError("comass must satisfy: comass >= 0") elif span is not None: if span < 1: raise ValueError("span must satisfy: span >= 1") comass = (span - 1) / 2 elif halflife is not None: if halflife <= 0: raise ValueError("halflife must satisfy: halflife > 0") decay = 1 - np.exp(np.log(0.5) / halflife) comass = 1 / decay - 1 elif alpha is not None: if alpha <= 0 or alpha > 1: raise ValueError("alpha must satisfy: 0 < alpha <= 1") comass = (1 - alpha) / alpha else: raise ValueError("Must pass one of comass, span, halflife, or alpha") return float(comass) def _calculate_deltas( times: str | np.ndarray | NDFrame | None, halflife: float | TimedeltaConvertibleTypes | None, ) -> np.ndarray: """ Return the diff of the times divided by the half-life. These values are used in the calculation of the ewm mean. Parameters ---------- times : str, np.ndarray, Series, default None Times corresponding to the observations. Must be monotonically increasing and ``datetime64[ns]`` dtype. halflife : float, str, timedelta, optional Half-life specifying the decay Returns ------- np.ndarray Diff of the times divided by the half-life """ # error: Item "str" of "Union[str, ndarray, NDFrameT, None]" has no # attribute "view" # error: Item "None" of "Union[str, ndarray, NDFrameT, None]" has no # attribute "view" _times = np.asarray( times.view(np.int64), dtype=np.float64 # type: ignore[union-attr] ) _halflife = float(Timedelta(halflife).value) return np.diff(_times) / _halflife class ExponentialMovingWindow(BaseWindow): r""" Provide exponential weighted (EW) functions. Available EW functions: ``mean()``, ``var()``, ``std()``, ``corr()``, ``cov()``. Exactly one parameter: ``com``, ``span``, ``halflife``, or ``alpha`` must be provided. Parameters ---------- com : float, optional Specify decay in terms of center of mass, :math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`. span : float, optional Specify decay in terms of span, :math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`. halflife : float, str, timedelta, optional Specify decay in terms of half-life, :math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for :math:`halflife > 0`. If ``times`` is specified, the time unit (str or timedelta) over which an observation decays to half its value. Only applicable to ``mean()`` and halflife value will not apply to the other functions. .. versionadded:: 1.1.0 alpha : float, optional Specify smoothing factor :math:`\alpha` directly, :math:`0 < \alpha \leq 1`. min_periods : int, default 0 Minimum number of observations in window required to have a value (otherwise result is NA). adjust : bool, default True Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings (viewing EWMA as a moving average). - When ``adjust=True`` (default), the EW function is calculated using weights :math:`w_i = (1 - \alpha)^i`. For example, the EW moving average of the series [:math:`x_0, x_1, ..., x_t`] would be: .. math:: y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ... + (1 - \alpha)^t x_0}{1 + (1 - \alpha) + (1 - \alpha)^2 + ... + (1 - \alpha)^t} - When ``adjust=False``, the exponentially weighted function is calculated recursively: .. math:: \begin{split} y_0 &= x_0\\ y_t &= (1 - \alpha) y_{t-1} + \alpha x_t, \end{split} ignore_na : bool, default False Ignore missing values when calculating weights; specify ``True`` to reproduce pre-0.15.0 behavior. - When ``ignore_na=False`` (default), weights are based on absolute positions. For example, the weights of :math:`x_0` and :math:`x_2` used in calculating the final weighted average of [:math:`x_0`, None, :math:`x_2`] are :math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and :math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``. - When ``ignore_na=True`` (reproducing pre-0.15.0 behavior), weights are based on relative positions. For example, the weights of :math:`x_0` and :math:`x_2` used in calculating the final weighted average of [:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if ``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``. axis : {0, 1}, default 0 The axis to use. The value 0 identifies the rows, and 1 identifies the columns. times : str, np.ndarray, Series, default None .. versionadded:: 1.1.0 Times corresponding to the observations. Must be monotonically increasing and ``datetime64[ns]`` dtype. If str, the name of the column in the DataFrame representing the times. If 1-D array like, a sequence with the same shape as the observations. Only applicable to ``mean()``. method : str {'single', 'table'}, default 'single' Execute the rolling operation per single column or row (``'single'``) or over the entire object (``'table'``). This argument is only implemented when specifying ``engine='numba'`` in the method call. Only applicable to ``mean()`` .. versionadded:: 1.4.0 Returns ------- DataFrame A Window sub-classed for the particular operation. See Also -------- rolling : Provides rolling window calculations. expanding : Provides expanding transformations. Notes ----- More details can be found at: :ref:`Exponentially weighted windows <window.exponentially_weighted>`. Examples -------- >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) >>> df B 0 0.0 1 1.0 2 2.0 3 NaN 4 4.0 >>> df.ewm(com=0.5).mean() B 0 0.000000 1 0.750000 2 1.615385 3 1.615385 4 3.670213 Specifying ``times`` with a timedelta ``halflife`` when computing mean. >>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17'] >>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean() B 0 0.000000 1 0.585786 2 1.523889 3 1.523889 4 3.233686 """ _attributes = [ "com", "span", "halflife", "alpha", "min_periods", "adjust", "ignore_na", "axis", "times", "method", ] def __init__( self, obj: NDFrame, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool = True, ignore_na: bool = False, axis: Axis = 0, times: str | np.ndarray | NDFrame | None = None, method: str = "single", *, selection=None, ): super().__init__( obj=obj, min_periods=1 if min_periods is None else max(int(min_periods), 1), on=None, center=False, closed=None, method=method, axis=axis, selection=selection, ) self.com = com self.span = span self.halflife = halflife self.alpha = alpha self.adjust = adjust self.ignore_na = ignore_na self.times = times if self.times is not None: if not self.adjust: raise NotImplementedError("times is not supported with adjust=False.") if isinstance(self.times, str): warnings.warn( ( "Specifying times as a string column label is deprecated " "and will be removed in a future version. Pass the column " "into times instead." ), FutureWarning, stacklevel=find_stack_level(), ) self.times = self._selected_obj[self.times] if not is_datetime64_ns_dtype(self.times): raise ValueError("times must be datetime64[ns] dtype.") # error: Argument 1 to "len" has incompatible type "Union[str, ndarray, # NDFrameT, None]"; expected "Sized" if len(self.times) != len(obj): # type: ignore[arg-type] raise ValueError("times must be the same length as the object.") if not isinstance(self.halflife, (str, datetime.timedelta)): raise ValueError( "halflife must be a string or datetime.timedelta object" ) if isna(self.times).any(): raise ValueError("Cannot convert NaT values to integer") self._deltas = _calculate_deltas(self.times, self.halflife) # Halflife is no longer applicable when calculating COM # But allow COM to still be calculated if the user passes other decay args if common.count_not_none(self.com, self.span, self.alpha) > 0: self._com = get_center_of_mass(self.com, self.span, None, self.alpha) else: self._com = 1.0 else: if self.halflife is not None and isinstance( self.halflife, (str, datetime.timedelta) ): raise ValueError( "halflife can only be a timedelta convertible argument if " "times is not None." ) # Without times, points are equally spaced self._deltas = np.ones(max(len(self.obj) - 1, 0), dtype=np.float64) self._com = get_center_of_mass( # error: Argument 3 to "get_center_of_mass" has incompatible type # "Union[float, Any, None, timedelta64, signedinteger[_64Bit]]"; # expected "Optional[float]" self.com, self.span, self.halflife, # type: ignore[arg-type] self.alpha, ) def _get_window_indexer(self) -> BaseIndexer: """ Return an indexer class that will compute the window start and end bounds """ return ExponentialMovingWindowIndexer() def online(self, engine="numba", engine_kwargs=None): """ Return an ``OnlineExponentialMovingWindow`` object to calculate exponentially moving window aggregations in an online method. .. versionadded:: 1.3.0 Parameters ---------- engine: str, default ``'numba'`` Execution engine to calculate online aggregations. Applies to all supported aggregation methods. engine_kwargs : dict, default None Applies to all supported aggregation methods. * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` and ``parallel`` dictionary keys. The values must either be ``True`` or ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be applied to the function Returns ------- OnlineExponentialMovingWindow """ return OnlineExponentialMovingWindow( obj=self.obj, com=self.com, span=self.span, halflife=self.halflife, alpha=self.alpha, min_periods=self.min_periods, adjust=self.adjust, ignore_na=self.ignore_na, axis=self.axis, times=self.times, engine=engine, engine_kwargs=engine_kwargs, selection=self._selection, ) @doc( _shared_docs["aggregate"], see_also=dedent( """ See Also -------- pandas.DataFrame.rolling.aggregate """ ), examples=dedent( """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) >>> df A B C 0 1 4 7 1 2 5 8 2 3 6 9 >>> df.ewm(alpha=0.5).mean() A B C 0 1.000000 4.000000 7.000000 1 1.666667 4.666667 7.666667 2 2.428571 5.428571 8.428571 """ ), klass="Series/Dataframe", axis="", ) def aggregate(self, func, *args, **kwargs): return super().aggregate(func, *args, **kwargs) agg = aggregate @doc( template_header, create_section_header("Parameters"), args_compat, window_agg_numba_parameters, kwargs_compat, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also, create_section_header("Notes"), numba_notes.replace("\n", "", 1), window_method="ewm", aggregation_description="(exponential weighted moment) mean", agg_method="mean", ) def mean(self, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): if self.method == "single": ewma_func = generate_numba_ewma_func( engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas ) numba_cache_key = (lambda x: x, "ewma") else: ewma_func = generate_ewma_numba_table_func( engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas ) numba_cache_key = (lambda x: x, "ewma_table") return self._apply( ewma_func, numba_cache_key=numba_cache_key, ) elif engine in ("cython", None): if engine_kwargs is not None: raise ValueError("cython engine does not accept engine_kwargs") nv.validate_window_func("mean", args, kwargs) deltas = None if self.times is None else self._deltas window_func = partial( window_aggregations.ewma, com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, deltas=deltas, ) return self._apply(window_func) else: raise ValueError("engine must be either 'numba' or 'cython'") @doc( template_header, create_section_header("Parameters"), dedent( """ bias : bool, default False Use a standard estimation bias correction. """ ).replace("\n", "", 1), args_compat, kwargs_compat, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also[:-1], window_method="ewm", aggregation_description="(exponential weighted moment) standard deviation", agg_method="std", ) def std(self, bias: bool = False, *args, **kwargs): nv.validate_window_func("std", args, kwargs) return zsqrt(self.var(bias=bias, **kwargs)) def vol(self, bias: bool = False, *args, **kwargs): warnings.warn( ( "vol is deprecated will be removed in a future version. " "Use std instead." ), FutureWarning, stacklevel=2, ) return self.std(bias, *args, **kwargs) @doc( template_header, create_section_header("Parameters"), dedent( """ bias : bool, default False Use a standard estimation bias correction. """ ).replace("\n", "", 1), args_compat, kwargs_compat, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also[:-1], window_method="ewm", aggregation_description="(exponential weighted moment) variance", agg_method="var", ) def var(self, bias: bool = False, *args, **kwargs): nv.validate_window_func("var", args, kwargs) window_func = window_aggregations.ewmcov wfunc = partial( window_func, com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, bias=bias, ) def var_func(values, begin, end, min_periods): return wfunc(values, begin, end, min_periods, values) return self._apply(var_func) @doc( template_header, create_section_header("Parameters"), dedent( """ other : Series or DataFrame , optional If not supplied then will default to self and produce pairwise output. pairwise : bool, default None If False then only matching columns between self and other will be used and the output will be a DataFrame. If True then all pairwise combinations will be calculated and the output will be a MultiIndex DataFrame in the case of DataFrame inputs. In the case of missing elements, only complete pairwise observations will be used. bias : bool, default False Use a standard estimation bias correction. """ ).replace("\n", "", 1), kwargs_compat, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also[:-1], window_method="ewm", aggregation_description="(exponential weighted moment) sample covariance", agg_method="cov", ) def cov( self, other: DataFrame | Series | None = None, pairwise: bool | None = None, bias: bool = False, **kwargs, ): from pandas import Series def cov_func(x, y): x_array = self._prep_values(x) y_array = self._prep_values(y) window_indexer = self._get_window_indexer() min_periods = ( self.min_periods if self.min_periods is not None else window_indexer.window_size ) start, end = window_indexer.get_window_bounds( num_values=len(x_array), min_periods=min_periods, center=self.center, closed=self.closed, ) result = window_aggregations.ewmcov( x_array, start, end, # error: Argument 4 to "ewmcov" has incompatible type # "Optional[int]"; expected "int" self.min_periods, # type: ignore[arg-type] y_array, self._com, self.adjust, self.ignore_na, bias, ) return Series(result, index=x.index, name=x.name) return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func) @doc( template_header, create_section_header("Parameters"), dedent( """ other : Series or DataFrame, optional If not supplied then will default to self and produce pairwise output. pairwise : bool, default None If False then only matching columns between self and other will be used and the output will be a DataFrame. If True then all pairwise combinations will be calculated and the output will be a MultiIndex DataFrame in the case of DataFrame inputs. In the case of missing elements, only complete pairwise observations will be used. """ ).replace("\n", "", 1), kwargs_compat, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also[:-1], window_method="ewm", aggregation_description="(exponential weighted moment) sample correlation", agg_method="corr", ) def corr( self, other: DataFrame | Series | None = None, pairwise: bool | None = None, **kwargs, ): from pandas import Series def cov_func(x, y): x_array = self._prep_values(x) y_array = self._prep_values(y) window_indexer = self._get_window_indexer() min_periods = ( self.min_periods if self.min_periods is not None else window_indexer.window_size ) start, end = window_indexer.get_window_bounds( num_values=len(x_array), min_periods=min_periods, center=self.center, closed=self.closed, ) def _cov(X, Y): return window_aggregations.ewmcov( X, start, end, min_periods, Y, self._com, self.adjust, self.ignore_na, True, ) with np.errstate(all="ignore"): cov = _cov(x_array, y_array) x_var = _cov(x_array, x_array) y_var = _cov(y_array, y_array) result = cov / zsqrt(x_var * y_var) return Series(result, index=x.index, name=x.name) return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func) class ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow): """ Provide an exponential moving window groupby implementation. """ _attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes def __init__(self, obj, *args, _grouper=None, **kwargs): super().__init__(obj, *args, _grouper=_grouper, **kwargs) if not obj.empty and self.times is not None: # sort the times and recalculate the deltas according to the groups groupby_order = np.concatenate(list(self._grouper.indices.values())) self._deltas = _calculate_deltas( self.times.take(groupby_order), # type: ignore[union-attr] self.halflife, ) def _get_window_indexer(self) -> GroupbyIndexer: """ Return an indexer class that will compute the window start and end bounds Returns ------- GroupbyIndexer """ window_indexer = GroupbyIndexer( groupby_indicies=self._grouper.indices, window_indexer=ExponentialMovingWindowIndexer, ) return window_indexer class OnlineExponentialMovingWindow(ExponentialMovingWindow): def __init__( self, obj: NDFrame, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool = True, ignore_na: bool = False, axis: Axis = 0, times: str | np.ndarray | NDFrame | None = None, engine: str = "numba", engine_kwargs: dict[str, bool] | None = None, *, selection=None, ): if times is not None: raise NotImplementedError( "times is not implemented with online operations." ) super().__init__( obj=obj, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis, times=times, selection=selection, ) self._mean = EWMMeanState( self._com, self.adjust, self.ignore_na, self.axis, obj.shape ) if maybe_use_numba(engine): self.engine = engine self.engine_kwargs = engine_kwargs else: raise ValueError("'numba' is the only supported engine") def reset(self): """ Reset the state captured by `update` calls. """ self._mean.reset() def aggregate(self, func, *args, **kwargs): return NotImplementedError def std(self, bias: bool = False, *args, **kwargs): return NotImplementedError def corr( self, other: DataFrame | Series | None = None, pairwise: bool | None = None, **kwargs, ): return NotImplementedError def cov( self, other: DataFrame | Series | None = None, pairwise: bool | None = None, bias: bool = False, **kwargs, ): return NotImplementedError def var(self, bias: bool = False, *args, **kwargs): return NotImplementedError def mean(self, *args, update=None, update_times=None, **kwargs): """ Calculate an online exponentially weighted mean. Parameters ---------- update: DataFrame or Series, default None New values to continue calculating the exponentially weighted mean from the last values and weights. Values should be float64 dtype. ``update`` needs to be ``None`` the first time the exponentially weighted mean is calculated. update_times: Series or 1-D np.ndarray, default None New times to continue calculating the exponentially weighted mean from the last values and weights. If ``None``, values are assumed to be evenly spaced in time. This feature is currently unsupported. Returns ------- DataFrame or Series Examples -------- >>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)}) >>> online_ewm = df.head(2).ewm(0.5).online() >>> online_ewm.mean() a b 0 0.00 5.00 1 0.75 5.75 >>> online_ewm.mean(update=df.tail(3)) a b 2 1.615385 6.615385 3 2.550000 7.550000 4 3.520661 8.520661 >>> online_ewm.reset() >>> online_ewm.mean() a b 0 0.00 5.00 1 0.75 5.75 """ result_kwargs = {} is_frame = True if self._selected_obj.ndim == 2 else False if update_times is not None: raise NotImplementedError("update_times is not implemented.") else: update_deltas = np.ones( max(self._selected_obj.shape[self.axis - 1] - 1, 0), dtype=np.float64 ) if update is not None: if self._mean.last_ewm is None: raise ValueError( "Must call mean with update=None first before passing update" ) result_from = 1 result_kwargs["index"] = update.index if is_frame: last_value = self._mean.last_ewm[np.newaxis, :] result_kwargs["columns"] = update.columns else: last_value = self._mean.last_ewm result_kwargs["name"] = update.name np_array = np.concatenate((last_value, update.to_numpy())) else: result_from = 0 result_kwargs["index"] = self._selected_obj.index if is_frame: result_kwargs["columns"] = self._selected_obj.columns else: result_kwargs["name"] = self._selected_obj.name np_array = self._selected_obj.astype(np.float64).to_numpy() ewma_func = generate_online_numba_ewma_func(self.engine_kwargs) result = self._mean.run_ewm( np_array if is_frame else np_array[:, np.newaxis], update_deltas, self.min_periods, ewma_func, ) if not is_frame: result = result.squeeze() result = result[result_from:] result = self._selected_obj._constructor(result, **result_kwargs) return result
33.889625
88
0.565203
[ "BSD-3-Clause" ]
DrGFreeman/pandas
pandas/core/window/ewm.py
30,704
Python
# -*- coding: utf-8 -*- """ Production Configurations - Use Amazon's S3 for storing static files and uploaded media - Use mailgun to send emails - Use Redis for cache """ from __future__ import absolute_import, unicode_literals from django.utils import six from .common import * # noqa # SECRET CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ SECRET_KEY = env('DJANGO_SECRET_KEY') # This ensures that Django will be able to detect a secure connection # properly on Heroku. SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # Use Whitenoise to serve static files # See: https://whitenoise.readthedocs.io/ WHITENOISE_MIDDLEWARE = ('whitenoise.middleware.WhiteNoiseMiddleware', ) MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE # SECURITY CONFIGURATION # ------------------------------------------------------------------------------ # See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security # and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy # set this to 60 seconds and then to 518400 when you can prove it works SECURE_HSTS_SECONDS = 60 SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool( 'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True) SECURE_CONTENT_TYPE_NOSNIFF = env.bool( 'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True) SECURE_BROWSER_XSS_FILTER = True SESSION_COOKIE_SECURE = True SESSION_COOKIE_HTTPONLY = True SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True) CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True X_FRAME_OPTIONS = 'DENY' # SITE CONFIGURATION # ------------------------------------------------------------------------------ # Hosts/domain names that are valid for this site # See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com']) # END SITE CONFIGURATION INSTALLED_APPS += ('gunicorn', ) # STORAGE CONFIGURATION # ------------------------------------------------------------------------------ # Uploaded Media Files # ------------------------ # See: http://django-storages.readthedocs.io/en/latest/index.html INSTALLED_APPS += ( 'storages', ) # Static Assets # ------------------------ # STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage' # EMAIL # ------------------------------------------------------------------------------ DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='course_portal <[email protected]>') EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[course_portal] ') SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL) # TEMPLATE CONFIGURATION # ------------------------------------------------------------------------------ # See: # https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader TEMPLATES[0]['OPTIONS']['loaders'] = [ ('django.template.loaders.cached.Loader', [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]), ] # DATABASE CONFIGURATION # ------------------------------------------------------------------------------ # Use the Heroku-style specification # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ DATABASES['default'] = env.db('DATABASE_URL') # Custom Admin URL, use {% url 'admin:index' %} ADMIN_URL = env('DJANGO_ADMIN_URL') # Your production stuff: Below this line define 3rd party library settings # ------------------------------------------------------------------------------
37.180952
106
0.6396
[ "MIT" ]
c-rhodes/course_portal
config/settings/production.py
3,904
Python
import torch from mmcv.cnn import ConvModule from torch import nn as nn from mmdet3d.models.builder import build_loss class VoteModule(nn.Module): """Vote module. Generate votes from seed point features. Args: in_channels (int): Number of channels of seed point features. vote_per_seed (int): Number of votes generated from each seed point. gt_per_seed (int): Number of ground truth votes generated from each seed point. conv_channels (tuple[int]): Out channels of vote generating convolution. conv_cfg (dict): Config of convolution. Default: dict(type='Conv1d'). norm_cfg (dict): Config of normalization. Default: dict(type='BN1d'). norm_feats (bool): Whether to normalize features. Default: True. vote_loss (dict): Config of vote loss. """ def __init__(self, in_channels, vote_per_seed=1, gt_per_seed=3, conv_channels=(16, 16), conv_cfg=dict(type='Conv1d'), norm_cfg=dict(type='BN1d'), norm_feats=True, vote_loss=None): super().__init__() self.in_channels = in_channels self.vote_per_seed = vote_per_seed self.gt_per_seed = gt_per_seed self.norm_feats = norm_feats self.vote_loss = build_loss(vote_loss) prev_channels = in_channels vote_conv_list = list() for k in range(len(conv_channels)): vote_conv_list.append( ConvModule( prev_channels, conv_channels[k], 1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, bias=True, inplace=True)) prev_channels = conv_channels[k] self.vote_conv = nn.Sequential(*vote_conv_list) # conv_out predicts coordinate and residual features out_channel = (3 + in_channels) * self.vote_per_seed self.conv_out = nn.Conv1d(prev_channels, out_channel, 1) def forward(self, seed_points, seed_feats): """forward. Args: seed_points (torch.Tensor): Coordinate of the seed points in shape (B, N, 3). seed_feats (torch.Tensor): Features of the seed points in shape (B, C, N). Returns: tuple[torch.Tensor]: - vote_points: Voted xyz based on the seed points \ with shape (B, M, 3), ``M=num_seed*vote_per_seed``. - vote_features: Voted features based on the seed points with \ shape (B, C, M) where ``M=num_seed*vote_per_seed``, \ ``C=vote_feature_dim``. """ batch_size, feat_channels, num_seed = seed_feats.shape num_vote = num_seed * self.vote_per_seed x = self.vote_conv(seed_feats) # (batch_size, (3+out_dim)*vote_per_seed, num_seed) votes = self.conv_out(x) votes = votes.transpose(2, 1).view(batch_size, num_seed, self.vote_per_seed, -1) offset = votes[:, :, :, 0:3] res_feats = votes[:, :, :, 3:] vote_points = (seed_points.unsqueeze(2) + offset).contiguous() vote_points = vote_points.view(batch_size, num_vote, 3) vote_feats = (seed_feats.transpose(2, 1).unsqueeze(2) + res_feats).contiguous() vote_feats = vote_feats.view(batch_size, num_vote, feat_channels).transpose(2, 1).contiguous() if self.norm_feats: features_norm = torch.norm(vote_feats, p=2, dim=1) vote_feats = vote_feats.div(features_norm.unsqueeze(1)) return vote_points, vote_feats def get_loss(self, seed_points, vote_points, seed_indices, vote_targets_mask, vote_targets): """Calculate loss of voting module. Args: seed_points (torch.Tensor): Coordinate of the seed points. vote_points (torch.Tensor): Coordinate of the vote points. seed_indices (torch.Tensor): Indices of seed points in raw points. vote_targets_mask (torch.Tensor): Mask of valid vote targets. vote_targets (torch.Tensor): Targets of votes. Returns: torch.Tensor: Weighted vote loss. """ batch_size, num_seed = seed_points.shape[:2] seed_gt_votes_mask = torch.gather(vote_targets_mask, 1, seed_indices).float() seed_indices_expand = seed_indices.unsqueeze(-1).repeat( 1, 1, 3 * self.gt_per_seed) seed_gt_votes = torch.gather(vote_targets, 1, seed_indices_expand) seed_gt_votes += seed_points.repeat(1, 1, 3) weight = seed_gt_votes_mask / (torch.sum(seed_gt_votes_mask) + 1e-6) distance = self.vote_loss( vote_points.view(batch_size * num_seed, -1, 3), seed_gt_votes.view(batch_size * num_seed, -1, 3), dst_weight=weight.view(batch_size * num_seed, 1))[1] vote_loss = torch.sum(torch.min(distance, dim=1)[0]) return vote_loss
38.690647
79
0.57289
[ "Apache-2.0" ]
BOURSa/mmdetection3d
mmdet3d/models/model_utils/vote_module.py
5,378
Python
"""Import/export any formats supported by meshio.""" import meshio import numpy as np import skfem MESH_TYPE_MAPPING = { 'tetra': skfem.MeshTet1, 'tetra10': skfem.MeshTet2, 'hexahedron': skfem.MeshHex1, 'hexahedron27': skfem.MeshHex2, 'wedge': skfem.MeshWedge1, 'triangle': skfem.MeshTri1, 'triangle6': skfem.MeshTri2, 'quad': skfem.MeshQuad1, 'quad9': skfem.MeshQuad2, 'line': skfem.MeshLine1, } BOUNDARY_TYPE_MAPPING = { 'line': 'vertex', 'triangle': 'line', 'quad': 'line', 'tetra': 'triangle', 'hexahedron': 'quad', 'tetra10': 'triangle', # TODO support quadratic facets 'triangle6': 'line', # TODO 'quad9': 'line', # TODO 'hexahedron27': 'quad', # TODO } TYPE_MESH_MAPPING = {MESH_TYPE_MAPPING[k]: k for k in dict(reversed(list(MESH_TYPE_MAPPING.items())))} HEX_MAPPING = [0, 3, 6, 2, 1, 5, 7, 4, 10, 16, 14, 9, 12, 18, 17, 11, 8, 15, 19, 13, 20, 25, 22, 23, 21, 24, 26] INV_HEX_MAPPING = [HEX_MAPPING.index(i) for i in range(len(HEX_MAPPING))] def from_meshio(m, out=None, int_data_to_sets=False, force_meshio_type=None): cells = m.cells_dict meshio_type = None if force_meshio_type is None: # detect 3D for k in cells: if k in {'tetra', 'hexahedron', 'tetra10', 'hexahedron27', 'wedge'}: meshio_type = k break if meshio_type is None: # detect 2D for k in cells: if k in {'triangle', 'quad', 'triangle6', 'quad9'}: meshio_type = k break if meshio_type is None: # detect 1D for k in cells: if k == 'line': meshio_type = k break else: meshio_type = force_meshio_type if meshio_type is None: raise NotImplementedError("Mesh type(s) not supported " "in import: {}.".format(cells.keys())) mesh_type = MESH_TYPE_MAPPING[meshio_type] # create p and t p = np.ascontiguousarray(mesh_type.strip_extra_coordinates(m.points).T) t = np.ascontiguousarray(cells[meshio_type].T) # reorder t if needed if meshio_type == 'hexahedron': t = t[INV_HEX_MAPPING[:8]] elif meshio_type == 'hexahedron27': t = t[INV_HEX_MAPPING] if int_data_to_sets: m.int_data_to_sets() subdomains = {} boundaries = {} # parse any subdomains from cell_sets if m.cell_sets: subdomains = {k: v[meshio_type] for k, v in m.cell_sets_dict.items() if meshio_type in v} # create temporary mesh for matching boundary elements mtmp = mesh_type(p, t) bnd_type = BOUNDARY_TYPE_MAPPING[meshio_type] # parse boundaries from cell_sets if m.cell_sets and bnd_type in m.cells_dict: facets = { k: [tuple(f) for f in np.sort(m.cells_dict[bnd_type][v[bnd_type]])] for k, v in m.cell_sets_dict.items() if bnd_type in v and k.split(":")[0] != "gmsh" } boundaries = {k: np.array([i for i, f in enumerate(map(tuple, mtmp.facets.T)) if f in v]) for k, v in facets.items()} # MSH 2.2 tag parsing if m.cell_data and m.field_data: try: elements_tag = m.cell_data_dict['gmsh:physical'][meshio_type] subdomains = {} tags = np.unique(elements_tag) def find_tagname(tag): for key in m.field_data: if m.field_data[key][0] == tag: return key return None for tag in tags: t_set = np.nonzero(tag == elements_tag)[0] subdomains[find_tagname(tag)] = t_set # find tagged boundaries if bnd_type in m.cell_data_dict['gmsh:physical']: facets = m.cells_dict[bnd_type] facets_tag = m.cell_data_dict['gmsh:physical'][bnd_type] # put meshio facets to dict dic = {tuple(np.sort(facets[i])): facets_tag[i] for i in range(facets.shape[0])} # get index of corresponding Mesh.facets for each meshio # facet found in the dict index = np.array([[dic[tuple(np.sort(mtmp.facets[:, i]))], i] for i in mtmp.boundary_facets() if tuple(np.sort(mtmp.facets[:, i])) in dic]) # read meshio tag numbers and names tags = index[:, 0] boundaries = {} for tag in np.unique(tags): tagindex = np.nonzero(tags == tag)[0] boundaries[find_tagname(tag)] = index[tagindex, 1] except Exception: pass # attempt parsing skfem tags if m.cell_data: _boundaries, _subdomains = mtmp._decode_cell_data(m.cell_data) boundaries.update(_boundaries) subdomains.update(_subdomains) # export mesh data if out is not None and isinstance(out, list): for i, field in enumerate(out): out[i] = getattr(m, field) return mesh_type( p, t, None if len(boundaries) == 0 else boundaries, None if len(subdomains) == 0 else subdomains, ) def from_file(filename, out, **kwargs): return from_meshio(meshio.read(filename), out, **kwargs) def to_meshio(mesh, point_data=None, cell_data=None, encode_cell_data=True, encode_point_data=False): t = mesh.dofs.element_dofs.copy() if isinstance(mesh, skfem.MeshHex2): t = t[HEX_MAPPING] elif isinstance(mesh, skfem.MeshHex): t = t[HEX_MAPPING[:8]] mtype = TYPE_MESH_MAPPING[type(mesh)] cells = {mtype: t.T} if encode_cell_data: if cell_data is None: cell_data = {} cell_data.update(mesh._encode_cell_data()) if encode_point_data: if point_data is None: point_data = {} point_data.update(mesh._encode_point_data()) mio = meshio.Mesh( mesh.p.T, cells, point_data=point_data, cell_data=cell_data, ) return mio def to_file(mesh, filename, point_data=None, cell_data=None, encode_cell_data=True, encode_point_data=False, **kwargs): meshio.write(filename, to_meshio(mesh, point_data, cell_data, encode_cell_data, encode_point_data), **kwargs)
29.181818
79
0.527329
[ "BSD-3-Clause" ]
bhaveshshrimali/scikit-fem
skfem/io/meshio.py
7,062
Python
from __future__ import annotations from abc import abstractmethod, ABC from decimal import Decimal from enum import Enum from typing import Dict, cast import numpy as np # A few extra general types from slim.simulation.lice_population import LicePopulation, GenoDistrib, GenoTreatmentValue,\ Alleles, GenoTreatmentDistrib Money = Decimal class Treatment(Enum): """ A stub for treatment types TODO: add other treatments here """ EMB = 0 THERMOLICER = 1 class GeneticMechanism(Enum): """ Genetic mechanism to be used when generating egg genotypes """ DISCRETE = 1 MATERNAL = 2 class HeterozygousResistance(Enum): """ Resistance in a monogenic, heterozygous setting. """ DOMINANT = 1 INCOMPLETELY_DOMINANT = 2 RECESSIVE = 3 TreatmentResistance = Dict[HeterozygousResistance, float] class TreatmentParams(ABC): """ Abstract class for all the treatments """ name = "" def __init__(self, payload): self.quadratic_fish_mortality_coeffs = np.array(payload["quadratic_fish_mortality_coeffs"]) self.effect_delay: int = payload["effect_delay"] self.application_period: int = payload["application_period"] @staticmethod def parse_pheno_resistance(pheno_resistance_dict: dict) -> TreatmentResistance: return {HeterozygousResistance[key.upper()]: val for key, val in pheno_resistance_dict.items()} def __get_mortality_pp_increase(self, temperature: float, fish_mass: float) -> float: """Get the mortality percentage point difference increase. :param temperature: the temperature in Celsius :param fish_mass: the fish mass (in grams) :returns: Mortality percentage point difference increase """ # TODO: is this the right way to solve this? fish_mass_indicator = 1 if fish_mass > 2000 else 0 input = np.array([1, temperature, fish_mass_indicator, temperature ** 2, temperature * fish_mass_indicator, fish_mass_indicator ** 2]) return max(float(self.quadratic_fish_mortality_coeffs.dot(input)), 0) @abstractmethod def delay(self, average_temperature: float): # pragma: no cover """ Delay before treatment should have a noticeable effect """ @staticmethod def get_allele_heterozygous_trait(alleles: Alleles): """ Get the allele heterozygous type """ # should we move this? if 'A' in alleles: if 'a' in alleles: trait = HeterozygousResistance.INCOMPLETELY_DOMINANT else: trait = HeterozygousResistance.DOMINANT else: trait = HeterozygousResistance.RECESSIVE return trait @abstractmethod def get_lice_treatment_mortality_rate( self, lice_population: LicePopulation, temperature: float) -> GenoTreatmentDistrib: """ Calculate the mortality rates of this treatment """ def get_fish_mortality_occurrences( self, temperature: float, fish_mass: float, num_fish: float, efficacy_window: float, mortality_events: int ): """Get the number of fish that die due to treatment :param temperature: the temperature of the cage :param num_fish: the number of fish :param fish_mass: the average fish mass (in grams) :param efficacy_window: the length of the efficacy window :param mortality_events: the number of fish mortality events to subtract from """ predicted_pp_increase = self.__get_mortality_pp_increase(temperature, fish_mass) mortality_events_pp = 100 * mortality_events / num_fish predicted_deaths = ((predicted_pp_increase + mortality_events_pp) * num_fish / 100) \ - mortality_events predicted_deaths /= efficacy_window return predicted_deaths class ChemicalTreatment(TreatmentParams): """Trait for all chemical treatments""" def __init__(self, payload): super().__init__(payload) self.pheno_resistance = self.parse_pheno_resistance(payload["pheno_resistance"]) self.price_per_kg = Money(payload["price_per_kg"]) self.durability_temp_ratio: float = payload["durability_temp_ratio"] class ThermalTreatment(TreatmentParams): """Trait for all thermal-based treatments""" def __init__(self, payload): super().__init__(payload) self.price_per_application = Money(payload["price_per_application"]) # NOTE: these are currently unused # self.exposure_temperature: float = payload["exposure_temperature"] # self.exposure_length: float = payload["efficacy"] class EMB(ChemicalTreatment): """Emamectin Benzoate""" name = "EMB" def delay(self, average_temperature: float): return self.durability_temp_ratio / average_temperature def get_lice_treatment_mortality_rate(self, lice_population: LicePopulation, _temperature=None): susceptible_populations = [lice_population.geno_by_lifestage[stage] for stage in LicePopulation.susceptible_stages] num_susc_per_geno = GenoDistrib.batch_sum(susceptible_populations) geno_treatment_distrib = {geno: GenoTreatmentValue(0.0, 0) for geno in num_susc_per_geno} for geno, num_susc in num_susc_per_geno.items(): trait = self.get_allele_heterozygous_trait(geno) susceptibility_factor = 1.0 - self.pheno_resistance[trait] geno_treatment_distrib[geno] = GenoTreatmentValue(susceptibility_factor, cast(int, num_susc)) return geno_treatment_distrib class Thermolicer(ThermalTreatment): name = "Thermolicer" def delay(self, _): return 1 # effects noticeable the next day def get_lice_treatment_mortality_rate( self, lice_population: LicePopulation, temperature: float) -> GenoTreatmentDistrib: if temperature >= 12: efficacy = 0.8 else: efficacy = 0.99 susceptible_populations = [lice_population.geno_by_lifestage[stage] for stage in LicePopulation.susceptible_stages] num_susc_per_geno = cast(GenoDistrib, GenoDistrib.batch_sum(susceptible_populations)) geno_treatment_distrib = {geno: GenoTreatmentValue(efficacy, cast(int, num_susc)) for geno, num_susc in num_susc_per_geno.items()} return geno_treatment_distrib
34.544503
115
0.677933
[ "MIT" ]
magicicada/slim
slim/types/TreatmentTypes.py
6,598
Python
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.logger._json}. """ from io import BytesIO, StringIO from typing import IO, Any, List, Optional, Sequence, cast from zope.interface import implementer from zope.interface.exceptions import BrokenMethodImplementation from zope.interface.verify import verifyObject from twisted.python.failure import Failure from twisted.trial.unittest import TestCase from .._flatten import extractField from .._format import formatEvent from .._global import globalLogPublisher from .._interfaces import ILogObserver, LogEvent from .._json import ( eventAsJSON, eventFromJSON, eventsFromJSONLogFile, jsonFileLogObserver, log as jsonLog, ) from .._levels import LogLevel from .._logger import Logger from .._observer import LogPublisher def savedJSONInvariants(testCase: TestCase, savedJSON: str) -> str: """ Assert a few things about the result of L{eventAsJSON}, then return it. @param testCase: The L{TestCase} with which to perform the assertions. @param savedJSON: The result of L{eventAsJSON}. @return: C{savedJSON} @raise AssertionError: If any of the preconditions fail. """ testCase.assertIsInstance(savedJSON, str) testCase.assertEqual(savedJSON.count("\n"), 0) return savedJSON class SaveLoadTests(TestCase): """ Tests for loading and saving log events. """ def savedEventJSON(self, event: LogEvent) -> str: """ Serialize some an events, assert some things about it, and return the JSON. @param event: An event. @return: JSON. """ return savedJSONInvariants(self, eventAsJSON(event)) def test_simpleSaveLoad(self) -> None: """ Saving and loading an empty dictionary results in an empty dictionary. """ self.assertEqual(eventFromJSON(self.savedEventJSON({})), {}) def test_saveLoad(self) -> None: """ Saving and loading a dictionary with some simple values in it results in those same simple values in the output; according to JSON's rules, though, all dictionary keys must be L{str} and any non-L{str} keys will be converted. """ self.assertEqual( eventFromJSON(self.savedEventJSON({1: 2, "3": "4"})), # type: ignore[dict-item] {"1": 2, "3": "4"}, ) def test_saveUnPersistable(self) -> None: """ Saving and loading an object which cannot be represented in JSON will result in a placeholder. """ self.assertEqual( eventFromJSON(self.savedEventJSON({"1": 2, "3": object()})), {"1": 2, "3": {"unpersistable": True}}, ) def test_saveNonASCII(self) -> None: """ Non-ASCII keys and values can be saved and loaded. """ self.assertEqual( eventFromJSON(self.savedEventJSON({"\u1234": "\u4321", "3": object()})), {"\u1234": "\u4321", "3": {"unpersistable": True}}, ) def test_saveBytes(self) -> None: """ Any L{bytes} objects will be saved as if they are latin-1 so they can be faithfully re-loaded. """ inputEvent = {"hello": bytes(range(255))} # On Python 3, bytes keys will be skipped by the JSON encoder. Not # much we can do about that. Let's make sure that we don't get an # error, though. inputEvent.update({b"skipped": "okay"}) # type: ignore[dict-item] self.assertEqual( eventFromJSON(self.savedEventJSON(inputEvent)), {"hello": bytes(range(255)).decode("charmap")}, ) def test_saveUnPersistableThenFormat(self) -> None: """ Saving and loading an object which cannot be represented in JSON, but has a string representation which I{can} be saved as JSON, will result in the same string formatting; any extractable fields will retain their data types. """ class Reprable: def __init__(self, value: object) -> None: self.value = value def __repr__(self) -> str: return "reprable" inputEvent = {"log_format": "{object} {object.value}", "object": Reprable(7)} outputEvent = eventFromJSON(self.savedEventJSON(inputEvent)) self.assertEqual(formatEvent(outputEvent), "reprable 7") def test_extractingFieldsPostLoad(self) -> None: """ L{extractField} can extract fields from an object that's been saved and loaded from JSON. """ class Obj: def __init__(self) -> None: self.value = 345 inputEvent = dict(log_format="{object.value}", object=Obj()) loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent)) self.assertEqual(extractField("object.value", loadedEvent), 345) # The behavior of extractField is consistent between pre-persistence # and post-persistence events, although looking up the key directly # won't be: self.assertRaises(KeyError, extractField, "object", loadedEvent) self.assertRaises(KeyError, extractField, "object", inputEvent) def test_failureStructurePreserved(self) -> None: """ Round-tripping a failure through L{eventAsJSON} preserves its class and structure. """ events: List[LogEvent] = [] log = Logger(observer=cast(ILogObserver, events.append)) try: 1 / 0 except ZeroDivisionError: f = Failure() log.failure("a message about failure", f) self.assertEqual(len(events), 1) loaded = eventFromJSON(self.savedEventJSON(events[0]))["log_failure"] self.assertIsInstance(loaded, Failure) self.assertTrue(loaded.check(ZeroDivisionError)) self.assertIsInstance(loaded.getTraceback(), str) def test_saveLoadLevel(self) -> None: """ It's important that the C{log_level} key remain a L{constantly.NamedConstant} object. """ inputEvent = dict(log_level=LogLevel.warn) loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent)) self.assertIs(loadedEvent["log_level"], LogLevel.warn) def test_saveLoadUnknownLevel(self) -> None: """ If a saved bit of JSON (let's say, from a future version of Twisted) were to persist a different log_level, it will resolve as None. """ loadedEvent = eventFromJSON( '{"log_level": {"name": "other", ' '"__class_uuid__": "02E59486-F24D-46AD-8224-3ACDF2A5732A"}}' ) self.assertEqual(loadedEvent, dict(log_level=None)) class FileLogObserverTests(TestCase): """ Tests for L{jsonFileLogObserver}. """ def test_interface(self) -> None: """ A L{FileLogObserver} returned by L{jsonFileLogObserver} is an L{ILogObserver}. """ with StringIO() as fileHandle: observer = jsonFileLogObserver(fileHandle) try: verifyObject(ILogObserver, observer) except BrokenMethodImplementation as e: self.fail(e) def assertObserverWritesJSON(self, recordSeparator: str = "\x1e") -> None: """ Asserts that an observer created by L{jsonFileLogObserver} with the given arguments writes events serialized as JSON text, using the given record separator. @param recordSeparator: C{recordSeparator} argument to L{jsonFileLogObserver} """ with StringIO() as fileHandle: observer = jsonFileLogObserver(fileHandle, recordSeparator) event = dict(x=1) observer(event) self.assertEqual(fileHandle.getvalue(), f'{recordSeparator}{{"x": 1}}\n') def test_observeWritesDefaultRecordSeparator(self) -> None: """ A L{FileLogObserver} created by L{jsonFileLogObserver} writes events serialzed as JSON text to a file when it observes events. By default, the record separator is C{"\\x1e"}. """ self.assertObserverWritesJSON() def test_observeWritesEmptyRecordSeparator(self) -> None: """ A L{FileLogObserver} created by L{jsonFileLogObserver} writes events serialzed as JSON text to a file when it observes events. This test sets the record separator to C{""}. """ self.assertObserverWritesJSON(recordSeparator="") def test_failureFormatting(self) -> None: """ A L{FileLogObserver} created by L{jsonFileLogObserver} writes failures serialized as JSON text to a file when it observes events. """ io = StringIO() publisher = LogPublisher() logged: List[LogEvent] = [] publisher.addObserver(cast(ILogObserver, logged.append)) publisher.addObserver(jsonFileLogObserver(io)) logger = Logger(observer=publisher) try: 1 / 0 except BaseException: logger.failure("failed as expected") reader = StringIO(io.getvalue()) deserialized = list(eventsFromJSONLogFile(reader)) def checkEvents(logEvents: Sequence[LogEvent]) -> None: self.assertEqual(len(logEvents), 1) [failureEvent] = logEvents self.assertIn("log_failure", failureEvent) failureObject = failureEvent["log_failure"] self.assertIsInstance(failureObject, Failure) tracebackObject = failureObject.getTracebackObject() self.assertEqual( tracebackObject.tb_frame.f_code.co_filename.rstrip("co"), __file__.rstrip("co"), ) checkEvents(logged) checkEvents(deserialized) class LogFileReaderTests(TestCase): """ Tests for L{eventsFromJSONLogFile}. """ def setUp(self) -> None: self.errorEvents: List[LogEvent] = [] @implementer(ILogObserver) def observer(event: LogEvent) -> None: if event["log_namespace"] == jsonLog.namespace and "record" in event: self.errorEvents.append(event) self.logObserver = observer globalLogPublisher.addObserver(observer) def tearDown(self) -> None: globalLogPublisher.removeObserver(self.logObserver) def _readEvents( self, inFile: IO[Any], recordSeparator: Optional[str] = None, bufferSize: int = 4096, ) -> None: """ Test that L{eventsFromJSONLogFile} reads two pre-defined events from a file: C{{"x": 1}} and C{{"y": 2}}. @param inFile: C{inFile} argument to L{eventsFromJSONLogFile} @param recordSeparator: C{recordSeparator} argument to L{eventsFromJSONLogFile} @param bufferSize: C{bufferSize} argument to L{eventsFromJSONLogFile} """ events = iter(eventsFromJSONLogFile(inFile, recordSeparator, bufferSize)) self.assertEqual(next(events), {"x": 1}) self.assertEqual(next(events), {"y": 2}) self.assertRaises(StopIteration, next, events) # No more events def test_readEventsAutoWithRecordSeparator(self) -> None: """ L{eventsFromJSONLogFile} reads events from a file and automatically detects use of C{"\\x1e"} as the record separator. """ with StringIO('\x1e{"x": 1}\n' '\x1e{"y": 2}\n') as fileHandle: self._readEvents(fileHandle) self.assertEqual(len(self.errorEvents), 0) def test_readEventsAutoEmptyRecordSeparator(self) -> None: """ L{eventsFromJSONLogFile} reads events from a file and automatically detects use of C{""} as the record separator. """ with StringIO('{"x": 1}\n' '{"y": 2}\n') as fileHandle: self._readEvents(fileHandle) self.assertEqual(len(self.errorEvents), 0) def test_readEventsExplicitRecordSeparator(self) -> None: """ L{eventsFromJSONLogFile} reads events from a file and is told to use a specific record separator. """ # Use "\x08" (backspace)... because that seems weird enough. with StringIO('\x08{"x": 1}\n' '\x08{"y": 2}\n') as fileHandle: self._readEvents(fileHandle, recordSeparator="\x08") self.assertEqual(len(self.errorEvents), 0) def test_readEventsPartialBuffer(self) -> None: """ L{eventsFromJSONLogFile} handles buffering a partial event. """ with StringIO('\x1e{"x": 1}\n' '\x1e{"y": 2}\n') as fileHandle: # Use a buffer size smaller than the event text. self._readEvents(fileHandle, bufferSize=1) self.assertEqual(len(self.errorEvents), 0) def test_readTruncated(self) -> None: """ If the JSON text for a record is truncated, skip it. """ with StringIO('\x1e{"x": 1' '\x1e{"y": 2}\n') as fileHandle: events = iter(eventsFromJSONLogFile(fileHandle)) self.assertEqual(next(events), {"y": 2}) self.assertRaises(StopIteration, next, events) # No more events # We should have logged the lost record self.assertEqual(len(self.errorEvents), 1) self.assertEqual( self.errorEvents[0]["log_format"], "Unable to read truncated JSON record: {record!r}", ) self.assertEqual(self.errorEvents[0]["record"], b'{"x": 1') def test_readUnicode(self) -> None: """ If the file being read from vends L{str}, strings decode from JSON as-is. """ # The Euro currency sign is "\u20ac" with StringIO('\x1e{"currency": "\u20ac"}\n') as fileHandle: events = iter(eventsFromJSONLogFile(fileHandle)) self.assertEqual(next(events), {"currency": "\u20ac"}) self.assertRaises(StopIteration, next, events) # No more events self.assertEqual(len(self.errorEvents), 0) def test_readUTF8Bytes(self) -> None: """ If the file being read from vends L{bytes}, strings decode from JSON as UTF-8. """ # The Euro currency sign is b"\xe2\x82\xac" in UTF-8 with BytesIO(b'\x1e{"currency": "\xe2\x82\xac"}\n') as fileHandle: events = iter(eventsFromJSONLogFile(fileHandle)) # The Euro currency sign is "\u20ac" self.assertEqual(next(events), {"currency": "\u20ac"}) self.assertRaises(StopIteration, next, events) # No more events self.assertEqual(len(self.errorEvents), 0) def test_readTruncatedUTF8Bytes(self) -> None: """ If the JSON text for a record is truncated in the middle of a two-byte Unicode codepoint, we don't want to see a codec exception and the stream is read properly when the additional data arrives. """ # The Euro currency sign is "\u20ac" and encodes in UTF-8 as three # bytes: b"\xe2\x82\xac". with BytesIO(b'\x1e{"x": "\xe2\x82\xac"}\n') as fileHandle: events = iter(eventsFromJSONLogFile(fileHandle, bufferSize=8)) self.assertEqual(next(events), {"x": "\u20ac"}) # Got text self.assertRaises(StopIteration, next, events) # No more events self.assertEqual(len(self.errorEvents), 0) def test_readInvalidUTF8Bytes(self) -> None: """ If the JSON text for a record contains invalid UTF-8 text, ignore that record. """ # The string b"\xe2\xac" is bogus with BytesIO(b'\x1e{"x": "\xe2\xac"}\n' b'\x1e{"y": 2}\n') as fileHandle: events = iter(eventsFromJSONLogFile(fileHandle)) self.assertEqual(next(events), {"y": 2}) self.assertRaises(StopIteration, next, events) # No more events # We should have logged the lost record self.assertEqual(len(self.errorEvents), 1) self.assertEqual( self.errorEvents[0]["log_format"], "Unable to decode UTF-8 for JSON record: {record!r}", ) self.assertEqual(self.errorEvents[0]["record"], b'{"x": "\xe2\xac"}\n') def test_readInvalidJSON(self) -> None: """ If the JSON text for a record is invalid, skip it. """ with StringIO('\x1e{"x": }\n' '\x1e{"y": 2}\n') as fileHandle: events = iter(eventsFromJSONLogFile(fileHandle)) self.assertEqual(next(events), {"y": 2}) self.assertRaises(StopIteration, next, events) # No more events # We should have logged the lost record self.assertEqual(len(self.errorEvents), 1) self.assertEqual( self.errorEvents[0]["log_format"], "Unable to read JSON record: {record!r}", ) self.assertEqual(self.errorEvents[0]["record"], b'{"x": }\n') def test_readUnseparated(self) -> None: """ Multiple events without a record separator are skipped. """ with StringIO('\x1e{"x": 1}\n' '{"y": 2}\n') as fileHandle: events = eventsFromJSONLogFile(fileHandle) self.assertRaises(StopIteration, next, events) # No more events # We should have logged the lost record self.assertEqual(len(self.errorEvents), 1) self.assertEqual( self.errorEvents[0]["log_format"], "Unable to read JSON record: {record!r}", ) self.assertEqual(self.errorEvents[0]["record"], b'{"x": 1}\n{"y": 2}\n') def test_roundTrip(self) -> None: """ Data written by a L{FileLogObserver} returned by L{jsonFileLogObserver} and read by L{eventsFromJSONLogFile} is reconstructed properly. """ event = dict(x=1) with StringIO() as fileHandle: observer = jsonFileLogObserver(fileHandle) observer(event) fileHandle.seek(0) events = eventsFromJSONLogFile(fileHandle) self.assertEqual(tuple(events), (event,)) self.assertEqual(len(self.errorEvents), 0)
37.600823
92
0.613002
[ "MIT" ]
Chinmoy-Prasad-Dutta/scrapy_scraper
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
18,274
Python
# -*- coding: utf-8 -*- ############################################################################### # # RetrieveCoupon # Retrieves a coupon with specified coupon id. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class RetrieveCoupon(Choreography): def __init__(self, temboo_session): """ Create a new instance of the RetrieveCoupon Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(RetrieveCoupon, self).__init__(temboo_session, '/Library/Stripe/Coupons/RetrieveCoupon') def new_input_set(self): return RetrieveCouponInputSet() def _make_result_set(self, result, path): return RetrieveCouponResultSet(result, path) def _make_execution(self, session, exec_id, path): return RetrieveCouponChoreographyExecution(session, exec_id, path) class RetrieveCouponInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the RetrieveCoupon Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_APIKey(self, value): """ Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Stripe) """ super(RetrieveCouponInputSet, self)._set_input('APIKey', value) def set_CouponID(self, value): """ Set the value of the CouponID input for this Choreo. ((required, string) The unique identifier of the coupon you want to retrieve) """ super(RetrieveCouponInputSet, self)._set_input('CouponID', value) class RetrieveCouponResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the RetrieveCoupon Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Stripe) """ return self._output.get('Response', None) class RetrieveCouponChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return RetrieveCouponResultSet(response, path)
36.193182
138
0.686656
[ "Apache-2.0" ]
jordanemedlock/psychtruths
temboo/Library/Stripe/Coupons/RetrieveCoupon.py
3,185
Python
# coding=utf-8 # Copyright (c) 2019 Alibaba PAI team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import tensorflow as tf def build_kd_loss(teacher_logits, student_logits, task_balance=0.3, distill_tempreture=2.0, labels=None, loss_type='mse'): if loss_type == 'mse': # mean square error return mse_loss(teacher_logits, student_logits) elif loss_type == 'xent': # cross entropy return xent_loss(teacher_logits, student_logits, labels, distill_tempreture, task_balance) else: # kl divergence return kld_loss(teacher_logits, student_logits, labels, distill_tempreture, task_balance) def mse_loss(teacher_logits, student_logits): loss = tf.reduce_mean(tf.nn.l2_loss(teacher_logits - student_logits)) return loss def xent_loss(teacher_logits, student_logits, labels, distill_tempreture, task_balance): student_task_xent = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(labels), logits=student_logits)) teacher_targets = tf.nn.softmax(teacher_logits / distill_tempreture) student_distill_xent = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( labels=tf.stop_gradient(teacher_targets), logits=student_logits)) losses = task_balance * student_task_xent losses += (1 - task_balance) * student_distill_xent return losses def kld_loss(teacher_logits, student_logits, labels, distill_temperature, task_balance): student_task_xent = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=tf.squeeze(labels), logits=student_logits) student_distill = tf.reduce_sum(tf.nn.softmax(student_logits / distill_temperature) * ( tf.log(tf.nn.softmax(student_logits / distill_temperature + 1e-5) - tf.log(tf.nn.softmax(teacher_logits / distill_temperature + 1e-5))))) losses = task_balance * tf.reduce_mean(student_task_xent) losses += (1 - task_balance) * tf.reduce_mean(student_distill) return losses def build_kd_probes_loss(teacher_logits, student_logits, task_balance=0.3, distill_tempreture=2.0, labels=None, loss_type='mse'): teacher_n_layers = len(teacher_logits) - 1 student_n_layers = len(student_logits) - 1 probes_kd_loss = 0.0 for i in range(student_n_layers): proportional_layer_idx = int(math.ceil(i * teacher_n_layers / student_n_layers)) student_layer_logits = student_logits[i] teacher_layer_logits = teacher_logits[proportional_layer_idx] probes_kd_loss += build_kd_loss(teacher_logits=teacher_layer_logits, student_logits=student_layer_logits, task_balance=task_balance, distill_tempreture=distill_tempreture, labels=labels, loss_type=loss_type) return probes_kd_loss
40.717391
91
0.657501
[ "MIT" ]
NoLoPhe/Kaleido-BERT
easytransfer/losses/kd_loss.py
3,746
Python
# -*- coding: utf-8 -*- """Look command.""" # Part of Clockwork MUD Server (https://github.com/whutch/cwmud) # :copyright: (c) 2008 - 2017 Will Hutcheson # :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt) from .. import Command, COMMANDS from ...characters import CharacterShell @COMMANDS.register class LookCommand(Command): """A command to allow a character to look at things.""" def _action(self): char = self.session.char if not char: self.session.send("You're not playing a character!") return if not char.room: self.session.send("You're not in a room!") return char.show_room() CharacterShell.add_verbs(LookCommand, "look", "l")
26.892857
73
0.640106
[ "MIT" ]
whutch/atria
cwmud/core/commands/info/look.py
753
Python
# coding: utf-8 # # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file # except in compliance with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for # the specific language governing permissions and limitations under the License. # import pprint import re # noqa: F401 import six import typing from enum import Enum if typing.TYPE_CHECKING: from typing import Dict, List, Optional, Union, Any from datetime import datetime class Status(Enum): """ Status of a resource. Allowed enum values: [FAILED, IN_PROGRESS, SUCCEEDED] """ FAILED = "FAILED" IN_PROGRESS = "IN_PROGRESS" SUCCEEDED = "SUCCEEDED" def to_dict(self): # type: () -> Dict[str, Any] """Returns the model properties as a dict""" result = {self.name: self.value} return result def to_str(self): # type: () -> str """Returns the string representation of the model""" return pprint.pformat(self.value) def __repr__(self): # type: () -> str """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): # type: (Any) -> bool """Returns true if both objects are equal""" if not isinstance(other, Status): return False return self.__dict__ == other.__dict__ def __ne__(self, other): # type: (Any) -> bool """Returns true if both objects are not equal""" return not self == other
26.676471
96
0.642227
[ "Apache-2.0" ]
alexa-labs/alexa-apis-for-python
ask-smapi-model/ask_smapi_model/v1/skill/status.py
1,814
Python
""" Modified from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/storage.py """ import torch from torch.utils.data.sampler import BatchSampler from torch.utils.data.sampler import SubsetRandomSampler class RolloutStorage(object): def __init__(self, num_steps, num_processes, obs_shape, action_space, state_size): self.observations = torch.zeros(num_steps + 1, num_processes, *obs_shape) self.states = torch.zeros(num_steps + 1, num_processes, state_size) self.rewards = torch.zeros(num_steps, num_processes, 1) self.value_preds = torch.zeros(num_steps + 1, num_processes, 1) self.returns = torch.zeros(num_steps + 1, num_processes, 1) self.action_log_probs = torch.zeros(num_steps, num_processes, 1) if action_space.__class__.__name__ == 'Discrete': action_shape = 1 else: action_shape = action_space.shape[0] self.actions = torch.zeros(num_steps, num_processes, action_shape) if action_space.__class__.__name__ == 'Discrete': self.actions = self.actions.long() self.masks = torch.ones(num_steps + 1, num_processes, 1) def cuda(self): self.observations = self.observations.cuda() self.states = self.states.cuda() self.rewards = self.rewards.cuda() self.value_preds = self.value_preds.cuda() self.returns = self.returns.cuda() self.action_log_probs = self.action_log_probs.cuda() self.actions = self.actions.cuda() self.masks = self.masks.cuda() def insert(self, step, current_obs, state, action, action_log_prob, value_pred, reward, mask): self.observations[step + 1].copy_(current_obs) self.states[step + 1].copy_(state) self.actions[step].copy_(action) self.action_log_probs[step].copy_(action_log_prob) self.value_preds[step].copy_(value_pred) self.rewards[step].copy_(reward) self.masks[step + 1].copy_(mask) def after_update(self): self.observations[0].copy_(self.observations[-1]) self.states[0].copy_(self.states[-1]) self.masks[0].copy_(self.masks[-1]) def compute_returns(self, next_value, use_gae, gamma, tau): if use_gae: self.value_preds[-1] = next_value gae = 0 for step in reversed(range(self.rewards.size(0))): delta = self.rewards[step] + gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step] gae = delta + gamma * tau * self.masks[step + 1] * gae self.returns[step] = gae + self.value_preds[step] else: self.returns[-1] = next_value for step in reversed(range(self.rewards.size(0))): self.returns[step] = self.returns[step + 1] * \ gamma * self.masks[step + 1] + self.rewards[step] def feed_forward_generator(self, advantages, num_mini_batch): num_steps, num_processes = self.rewards.size()[0:2] batch_size = num_processes * num_steps assert batch_size >= num_mini_batch, "ppo req batch size to be greater than number of mini batches" mini_batch_size = batch_size // num_mini_batch sampler = BatchSampler( SubsetRandomSampler(range(batch_size)), mini_batch_size, drop_last=False) for indices in sampler: indices = torch.LongTensor(indices) if advantages.is_cuda: indices = indices.cuda() observations_batch = self.observations[:-1].view( -1, *self.observations.size()[2:])[indices] states_batch = self.states[:-1].view(-1, self.states.size(-1))[indices] actions_batch = self.actions.view(-1, self.actions.size(-1))[indices] return_batch = self.returns[:-1].view(-1, 1)[indices] masks_batch = self.masks[:-1].view(-1, 1)[indices] old_action_log_probs_batch = self.action_log_probs.view(-1, 1)[indices] adv_targ = advantages.view(-1, 1)[indices] yield observations_batch, states_batch, actions_batch, \ return_batch, masks_batch, old_action_log_probs_batch, adv_targ def recurrent_generator(self, advantages, num_mini_batch): num_processes = self.rewards.size(1) num_envs_per_batch = num_processes // num_mini_batch perm = torch.randperm(num_processes) for start_ind in range(0, num_processes, num_envs_per_batch): observations_batch = [] states_batch = [] actions_batch = [] return_batch = [] masks_batch = [] old_action_log_probs_batch = [] adv_targ = [] #pdb.set_trace() for offset in range(num_envs_per_batch): ind = perm[start_ind + offset] observations_batch.append(self.observations[:-1, ind]) states_batch.append(self.states[:-1, ind]) actions_batch.append(self.actions[:, ind]) return_batch.append(self.returns[:-1, ind]) masks_batch.append(self.masks[:-1, ind]) old_action_log_probs_batch.append( self.action_log_probs[:, ind]) adv_targ.append(advantages[:, ind]) #pdb.set_trace() observations_batch = torch.cat(observations_batch, 0) states_batch = torch.cat(states_batch, 0) actions_batch = torch.cat(actions_batch, 0) return_batch = torch.cat(return_batch, 0) masks_batch = torch.cat(masks_batch, 0) old_action_log_probs_batch = torch.cat(old_action_log_probs_batch, 0) adv_targ = torch.cat(adv_targ, 0) yield observations_batch, states_batch, actions_batch, \ return_batch, masks_batch, old_action_log_probs_batch, adv_targ
46.934783
113
0.574494
[ "MIT" ]
CoAxLab/AdaptiveDecisionMaking_2018
ADMCode/snuz/ppo/storage.py
6,477
Python
# -*- coding: utf-8 -*- from __future__ import unicode_literals import collections from django.db import migrations import mptt import mptt.managers def copy_regulations(apps, schema_editor): Regulation = apps.get_model('regcore', 'Regulation') Document = apps.get_model('regcore', 'Document') for reg in Regulation.objects.all(): data = { field.name: getattr(reg, field.name) for field in Regulation._meta.fields if field.name not in {'parent'} } doc = Document(doc_type='cfr', **data) doc.parent_id = reg.parent_id doc.save() def uncopy_regulations(apps, schema_editor): Regulation = apps.get_model('regcore', 'Regulation') Document = apps.get_model('regcore', 'Document') for doc in Document.objects.filter(doc_type='cfr'): data = { field.name: getattr(doc, field.name) for field in Regulation._meta.fields if field.name not in {'parent'} } reg = Regulation(**data) reg.parent_id = doc.parent_id reg.save() def copy_preambles(apps, schema_editor): Preamble = apps.get_model('regcore', 'Preamble') Document = apps.get_model('regcore', 'Document') # Bind manager manager = mptt.managers.TreeManager() manager.model = Document mptt.register(Document) manager.contribute_to_class(Document, 'objects') for pre in Preamble.objects.all(): write_node(Document, pre.data, 'preamble', pre.data['label']) def uncopy_preambles(apps, schema_editor): Preamble = apps.get_model('regcore', 'Preamble') Document = apps.get_model('regcore', 'Document') # Bind manager manager = mptt.managers.TreeManager() manager.model = Document mptt.register(Document) manager.contribute_to_class(Document, 'objects') for doc in Document.objects.filter(doc_type='preamble', root=True): nodes = doc.get_descendants(include_self=True) data = serialize(nodes[0], build_adjacency_map(nodes)) pre = Preamble(document_number=doc.label_string, data=data) pre.save() # Copy lightly modified import helpers def serialize(pre, adjacency_map): return { 'label': pre.label_string.split('-'), 'text': pre.text, 'node_type': pre.node_type, 'children': [ serialize(child, adjacency_map) for child in adjacency_map.get(pre.id, []) ], } def build_adjacency_map(regs): """Build mapping from node IDs to child records :param regs: List of `Regulation` records """ ret = collections.defaultdict(list) for reg in regs: if reg.parent_id is not None: ret[reg.parent_id].append(reg) return ret def write_node(Document, node, doc_type, label_id, version=None): to_save = [] labels_seen = set() def add_node(node, parent=None): label_tuple = tuple(node['label']) labels_seen.add(label_tuple) node['parent'] = parent to_save.append(node) for child in node['children']: add_node(child, parent=node) add_node(node) DMDocuments(Document).bulk_put(to_save, doc_type, label_id, version) def treeify(node, tree_id, pos=1, level=0): """Set tree properties in memory. """ node['tree_id'] = tree_id node['level'] = level node['left'] = pos for child in node.get('children', []): pos = treeify(child, tree_id, pos=pos + 1, level=level + 1) pos = pos + 1 node['right'] = pos return pos def build_id(reg, version=None): if version is not None: return '{}:{}'.format(version, '-'.join(reg['label'])) return '-'.join(reg['label']) class DMDocuments(object): def __init__(self, Document): self.Document = Document def _transform(self, reg, doc_type, version=None): """Create the Django object""" return self.Document( id=build_id(reg, version), doc_type=doc_type, version=version, parent_id=( build_id(reg['parent'], version) if reg.get('parent') else None ), tree_id=reg['tree_id'], level=reg['level'], lft=reg['left'], rght=reg['right'], label_string='-'.join(reg['label']), text=reg['text'], title=reg.get('title', ''), node_type=reg['node_type'], root=(len(reg['label']) == 1), ) def bulk_put(self, regs, doc_type, root_label, version): self.Document.objects.filter( version=version, doc_type=doc_type, label_string__startswith=root_label, ).delete() treeify(regs[0], self.Document.objects._get_next_tree_id()) self.Document.objects.bulk_create( [self._transform(r, doc_type, version) for r in regs], batch_size=25) class Migration(migrations.Migration): dependencies = [ ('regcore', '0011_create_document'), ] operations = [ migrations.RunPython(copy_regulations, uncopy_regulations), migrations.RunPython(copy_preambles, uncopy_preambles), ]
28.769231
72
0.612872
[ "CC0-1.0" ]
18F/regulations-core
regcore/migrations/0012_migrate_documents.py
5,236
Python
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables __all__ = [ 'GetWorkspaceConnectionResult', 'AwaitableGetWorkspaceConnectionResult', 'get_workspace_connection', ] @pulumi.output_type class GetWorkspaceConnectionResult: """ Workspace connection. """ def __init__(__self__, auth_type=None, category=None, id=None, name=None, target=None, type=None, value=None): if auth_type and not isinstance(auth_type, str): raise TypeError("Expected argument 'auth_type' to be a str") pulumi.set(__self__, "auth_type", auth_type) if category and not isinstance(category, str): raise TypeError("Expected argument 'category' to be a str") pulumi.set(__self__, "category", category) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if target and not isinstance(target, str): raise TypeError("Expected argument 'target' to be a str") pulumi.set(__self__, "target", target) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) if value and not isinstance(value, str): raise TypeError("Expected argument 'value' to be a str") pulumi.set(__self__, "value", value) @property @pulumi.getter(name="authType") def auth_type(self) -> Optional[str]: """ Authorization type of the workspace connection. """ return pulumi.get(self, "auth_type") @property @pulumi.getter def category(self) -> Optional[str]: """ Category of the workspace connection. """ return pulumi.get(self, "category") @property @pulumi.getter def id(self) -> str: """ ResourceId of the workspace connection. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ Friendly name of the workspace connection. """ return pulumi.get(self, "name") @property @pulumi.getter def target(self) -> Optional[str]: """ Target of the workspace connection. """ return pulumi.get(self, "target") @property @pulumi.getter def type(self) -> str: """ Resource type of workspace connection. """ return pulumi.get(self, "type") @property @pulumi.getter def value(self) -> Optional[str]: """ Value details of the workspace connection. """ return pulumi.get(self, "value") class AwaitableGetWorkspaceConnectionResult(GetWorkspaceConnectionResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetWorkspaceConnectionResult( auth_type=self.auth_type, category=self.category, id=self.id, name=self.name, target=self.target, type=self.type, value=self.value) def get_workspace_connection(connection_name: Optional[str] = None, resource_group_name: Optional[str] = None, workspace_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceConnectionResult: """ Workspace connection. :param str connection_name: Friendly name of the workspace connection :param str resource_group_name: Name of the resource group in which workspace is located. :param str workspace_name: Name of Azure Machine Learning workspace. """ __args__ = dict() __args__['connectionName'] = connection_name __args__['resourceGroupName'] = resource_group_name __args__['workspaceName'] = workspace_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:machinelearningservices/v20200601:getWorkspaceConnection', __args__, opts=opts, typ=GetWorkspaceConnectionResult).value return AwaitableGetWorkspaceConnectionResult( auth_type=__ret__.auth_type, category=__ret__.category, id=__ret__.id, name=__ret__.name, target=__ret__.target, type=__ret__.type, value=__ret__.value)
33.37415
170
0.63351
[ "Apache-2.0" ]
pulumi/pulumi-azure-nextgen
sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200601/get_workspace_connection.py
4,906
Python